cpdbench.control.CPDDatasetResult

  1from enum import Enum
  2
  3from cpdbench.exception.ResultSetInconsistentException import ResultSetInconsistentException
  4import traceback
  5
  6from cpdbench.task.Task import Task
  7
  8
  9class ErrorType(str, Enum):
 10    """Enum for all error types which can occur during the CPDBench execution"""
 11    DATASET_ERROR = "DATASET_ERROR"
 12    ALGORITHM_ERROR = "ALGORITHM_ERROR"
 13    METRIC_ERROR = "METRIC_ERROR"
 14
 15
 16class CPDDatasetResult:
 17    """Container for all results of one single dataset including algorithm and metric results"""
 18
 19    def __init__(self, dataset: Task, algorithms: list[Task], metrics: list[Task]):
 20        """Constructs a dataset result with the basic attributes
 21        :param dataset: task which created the dataset
 22        :param algorithms: list of all algorithm tasks which were used with this dataset
 23        :param metrics: list of all metric tasks which were used with this dataset
 24        """
 25
 26        self._dataset = dataset.get_task_name()
 27        self._algorithms = [a.get_task_name() for a in algorithms]
 28        self._metrics = [m.get_task_name() for m in metrics]
 29
 30        self._indexes = {}
 31        self._scores = {}
 32        self._metric_scores = {}
 33
 34        self._errors = []
 35        self._parameters = ({self._dataset: dataset.get_param_dict()}
 36                            | {task.get_task_name(): task.get_param_dict() for task in algorithms}
 37                            | {task.get_task_name(): task.get_param_dict() for task in metrics})
 38
 39        self._dataset_runtime = -1
 40        self._algorithm_runtimes = {}
 41        for a in self._algorithms:
 42            self._metric_scores[a] = {}
 43
 44    def add_dataset_runtime(self, runtime: float) -> None:
 45        """Adds the runtime of the dataset task to the result object.
 46        Once a runtime was added, the value is immutable.
 47        :param runtime: the runtime of the task in seconds
 48        """
 49        if self._dataset_runtime == -1:
 50            self._dataset_runtime = runtime
 51
 52    def add_algorithm_result(self, indexes: list[int], scores: list[float], algorithm: str, runtime: float) -> None:
 53        """Adds an algorithm result with indexes and confidence scores to the result container.
 54        :param indexes: list of calculated changepoint indexes
 55        :param scores: list of calculated confidence scores respective to the indexes list
 56        :param algorithm: name of the calculated algorithm
 57        :param runtime: runtime of the algorithm execution in seconds
 58        """
 59
 60        if algorithm not in self._algorithms:
 61            raise ResultSetInconsistentException(f"Algorithm {algorithm} does not exist")
 62        self._indexes[algorithm] = indexes
 63        self._scores[algorithm] = scores
 64        self._algorithm_runtimes[algorithm] = {}
 65        self._algorithm_runtimes[algorithm]["runtime"] = runtime
 66
 67    def add_metric_score(self, metric_score: float, algorithm: str, metric: str, runtime: float) -> None:
 68        """Adds a metric result of an algorithm/dataset to the result container.
 69        :param metric_score: calculated metric score as float
 70        :param algorithm: name of the calculated algorithm
 71        :param metric: name of the used metric
 72        :param runtime: runtime of the metric execution in seconds
 73        """
 74
 75        if (algorithm not in self._algorithms
 76                or metric not in self._metrics):
 77            raise ResultSetInconsistentException()
 78        if self._indexes.get(algorithm) is None:
 79            raise ResultSetInconsistentException()
 80        self._metric_scores[algorithm][metric] = metric_score
 81        self._algorithm_runtimes[algorithm][metric] = runtime
 82
 83    def add_error(self, exception: Exception, error_type: ErrorType, algorithm: str = None, metric: str = None) -> None:
 84        """Adds a thrown error to the result container.
 85        :param exception: the thrown exception object
 86        :param error_type: the error type of the thrown exception
 87        :param algorithm: name of the algorithm where the exception occurred if applicable
 88        :param metric: name of the metric where the exception occurred if applicable
 89        """
 90
 91        self._errors.append((type(exception).__name__, ''.join(traceback.format_exception(None, exception,
 92                                                                                          exception.__traceback__)),
 93                             error_type, algorithm, metric))
 94
 95    def get_result_as_dict(self) -> dict:
 96        """Returns the result container formatted as dictionary.
 97        :returns: the complete results with indexes, scores and metric scores of one dataset as dict
 98        """
 99
100        return {
101            self._dataset: {
102                "indexes": self._indexes,
103                "scores": self._scores,
104                "metric_scores": self._metric_scores
105            }
106        }
107
108    def get_errors_as_list(self) -> list:
109        """Returns the list of errors occurred around the dataset.
110        :returns: all errors of the dataset as python list
111        """
112
113        return [
114            {
115                "dataset": self._dataset,
116                "error_type": error[2],
117                "algorithm": error[3],
118                "metric": error[4],
119                "exception_type": error[0],
120                "trace_back": error[1]
121            }
122            for error in self._errors
123        ]
124
125    def get_parameters(self) -> dict:
126        """Returns the parameters of all included tasks as dict.
127        :returns: the parameters as python dict
128        """
129        return self._parameters
130
131    def get_runtimes(self) -> dict:
132        """Returns the runtimes of all included tasks as dict.
133        :returns: the runtimes as python dict
134        """
135        if self._dataset_runtime == -1:
136            result_dict = {self._dataset: self._algorithm_runtimes}
137        else:
138            result_dict = {
139                self._dataset: self._algorithm_runtimes | {
140                    "runtime": self._dataset_runtime,
141                }
142            }
143        return result_dict
class ErrorType(builtins.str, enum.Enum):
10class ErrorType(str, Enum):
11    """Enum for all error types which can occur during the CPDBench execution"""
12    DATASET_ERROR = "DATASET_ERROR"
13    ALGORITHM_ERROR = "ALGORITHM_ERROR"
14    METRIC_ERROR = "METRIC_ERROR"

Enum for all error types which can occur during the CPDBench execution

DATASET_ERROR = <ErrorType.DATASET_ERROR: 'DATASET_ERROR'>
ALGORITHM_ERROR = <ErrorType.ALGORITHM_ERROR: 'ALGORITHM_ERROR'>
METRIC_ERROR = <ErrorType.METRIC_ERROR: 'METRIC_ERROR'>
Inherited Members
enum.Enum
name
value
builtins.str
encode
replace
split
rsplit
join
capitalize
casefold
title
center
count
expandtabs
find
partition
index
ljust
lower
lstrip
rfind
rindex
rjust
rstrip
rpartition
splitlines
strip
swapcase
translate
upper
startswith
endswith
removeprefix
removesuffix
isascii
islower
isupper
istitle
isspace
isdecimal
isdigit
isnumeric
isalpha
isalnum
isidentifier
isprintable
zfill
format
format_map
maketrans
class CPDDatasetResult:
 17class CPDDatasetResult:
 18    """Container for all results of one single dataset including algorithm and metric results"""
 19
 20    def __init__(self, dataset: Task, algorithms: list[Task], metrics: list[Task]):
 21        """Constructs a dataset result with the basic attributes
 22        :param dataset: task which created the dataset
 23        :param algorithms: list of all algorithm tasks which were used with this dataset
 24        :param metrics: list of all metric tasks which were used with this dataset
 25        """
 26
 27        self._dataset = dataset.get_task_name()
 28        self._algorithms = [a.get_task_name() for a in algorithms]
 29        self._metrics = [m.get_task_name() for m in metrics]
 30
 31        self._indexes = {}
 32        self._scores = {}
 33        self._metric_scores = {}
 34
 35        self._errors = []
 36        self._parameters = ({self._dataset: dataset.get_param_dict()}
 37                            | {task.get_task_name(): task.get_param_dict() for task in algorithms}
 38                            | {task.get_task_name(): task.get_param_dict() for task in metrics})
 39
 40        self._dataset_runtime = -1
 41        self._algorithm_runtimes = {}
 42        for a in self._algorithms:
 43            self._metric_scores[a] = {}
 44
 45    def add_dataset_runtime(self, runtime: float) -> None:
 46        """Adds the runtime of the dataset task to the result object.
 47        Once a runtime was added, the value is immutable.
 48        :param runtime: the runtime of the task in seconds
 49        """
 50        if self._dataset_runtime == -1:
 51            self._dataset_runtime = runtime
 52
 53    def add_algorithm_result(self, indexes: list[int], scores: list[float], algorithm: str, runtime: float) -> None:
 54        """Adds an algorithm result with indexes and confidence scores to the result container.
 55        :param indexes: list of calculated changepoint indexes
 56        :param scores: list of calculated confidence scores respective to the indexes list
 57        :param algorithm: name of the calculated algorithm
 58        :param runtime: runtime of the algorithm execution in seconds
 59        """
 60
 61        if algorithm not in self._algorithms:
 62            raise ResultSetInconsistentException(f"Algorithm {algorithm} does not exist")
 63        self._indexes[algorithm] = indexes
 64        self._scores[algorithm] = scores
 65        self._algorithm_runtimes[algorithm] = {}
 66        self._algorithm_runtimes[algorithm]["runtime"] = runtime
 67
 68    def add_metric_score(self, metric_score: float, algorithm: str, metric: str, runtime: float) -> None:
 69        """Adds a metric result of an algorithm/dataset to the result container.
 70        :param metric_score: calculated metric score as float
 71        :param algorithm: name of the calculated algorithm
 72        :param metric: name of the used metric
 73        :param runtime: runtime of the metric execution in seconds
 74        """
 75
 76        if (algorithm not in self._algorithms
 77                or metric not in self._metrics):
 78            raise ResultSetInconsistentException()
 79        if self._indexes.get(algorithm) is None:
 80            raise ResultSetInconsistentException()
 81        self._metric_scores[algorithm][metric] = metric_score
 82        self._algorithm_runtimes[algorithm][metric] = runtime
 83
 84    def add_error(self, exception: Exception, error_type: ErrorType, algorithm: str = None, metric: str = None) -> None:
 85        """Adds a thrown error to the result container.
 86        :param exception: the thrown exception object
 87        :param error_type: the error type of the thrown exception
 88        :param algorithm: name of the algorithm where the exception occurred if applicable
 89        :param metric: name of the metric where the exception occurred if applicable
 90        """
 91
 92        self._errors.append((type(exception).__name__, ''.join(traceback.format_exception(None, exception,
 93                                                                                          exception.__traceback__)),
 94                             error_type, algorithm, metric))
 95
 96    def get_result_as_dict(self) -> dict:
 97        """Returns the result container formatted as dictionary.
 98        :returns: the complete results with indexes, scores and metric scores of one dataset as dict
 99        """
100
101        return {
102            self._dataset: {
103                "indexes": self._indexes,
104                "scores": self._scores,
105                "metric_scores": self._metric_scores
106            }
107        }
108
109    def get_errors_as_list(self) -> list:
110        """Returns the list of errors occurred around the dataset.
111        :returns: all errors of the dataset as python list
112        """
113
114        return [
115            {
116                "dataset": self._dataset,
117                "error_type": error[2],
118                "algorithm": error[3],
119                "metric": error[4],
120                "exception_type": error[0],
121                "trace_back": error[1]
122            }
123            for error in self._errors
124        ]
125
126    def get_parameters(self) -> dict:
127        """Returns the parameters of all included tasks as dict.
128        :returns: the parameters as python dict
129        """
130        return self._parameters
131
132    def get_runtimes(self) -> dict:
133        """Returns the runtimes of all included tasks as dict.
134        :returns: the runtimes as python dict
135        """
136        if self._dataset_runtime == -1:
137            result_dict = {self._dataset: self._algorithm_runtimes}
138        else:
139            result_dict = {
140                self._dataset: self._algorithm_runtimes | {
141                    "runtime": self._dataset_runtime,
142                }
143            }
144        return result_dict

Container for all results of one single dataset including algorithm and metric results

CPDDatasetResult(dataset: cpdbench.task.Task.Task, algorithms: list, metrics: list)
20    def __init__(self, dataset: Task, algorithms: list[Task], metrics: list[Task]):
21        """Constructs a dataset result with the basic attributes
22        :param dataset: task which created the dataset
23        :param algorithms: list of all algorithm tasks which were used with this dataset
24        :param metrics: list of all metric tasks which were used with this dataset
25        """
26
27        self._dataset = dataset.get_task_name()
28        self._algorithms = [a.get_task_name() for a in algorithms]
29        self._metrics = [m.get_task_name() for m in metrics]
30
31        self._indexes = {}
32        self._scores = {}
33        self._metric_scores = {}
34
35        self._errors = []
36        self._parameters = ({self._dataset: dataset.get_param_dict()}
37                            | {task.get_task_name(): task.get_param_dict() for task in algorithms}
38                            | {task.get_task_name(): task.get_param_dict() for task in metrics})
39
40        self._dataset_runtime = -1
41        self._algorithm_runtimes = {}
42        for a in self._algorithms:
43            self._metric_scores[a] = {}

Constructs a dataset result with the basic attributes

Parameters
  • dataset: task which created the dataset
  • algorithms: list of all algorithm tasks which were used with this dataset
  • metrics: list of all metric tasks which were used with this dataset
def add_dataset_runtime(self, runtime: float) -> None:
45    def add_dataset_runtime(self, runtime: float) -> None:
46        """Adds the runtime of the dataset task to the result object.
47        Once a runtime was added, the value is immutable.
48        :param runtime: the runtime of the task in seconds
49        """
50        if self._dataset_runtime == -1:
51            self._dataset_runtime = runtime

Adds the runtime of the dataset task to the result object. Once a runtime was added, the value is immutable.

Parameters
  • runtime: the runtime of the task in seconds
def add_algorithm_result( self, indexes: list, scores: list, algorithm: str, runtime: float) -> None:
53    def add_algorithm_result(self, indexes: list[int], scores: list[float], algorithm: str, runtime: float) -> None:
54        """Adds an algorithm result with indexes and confidence scores to the result container.
55        :param indexes: list of calculated changepoint indexes
56        :param scores: list of calculated confidence scores respective to the indexes list
57        :param algorithm: name of the calculated algorithm
58        :param runtime: runtime of the algorithm execution in seconds
59        """
60
61        if algorithm not in self._algorithms:
62            raise ResultSetInconsistentException(f"Algorithm {algorithm} does not exist")
63        self._indexes[algorithm] = indexes
64        self._scores[algorithm] = scores
65        self._algorithm_runtimes[algorithm] = {}
66        self._algorithm_runtimes[algorithm]["runtime"] = runtime

Adds an algorithm result with indexes and confidence scores to the result container.

Parameters
  • indexes: list of calculated changepoint indexes
  • scores: list of calculated confidence scores respective to the indexes list
  • algorithm: name of the calculated algorithm
  • runtime: runtime of the algorithm execution in seconds
def add_metric_score( self, metric_score: float, algorithm: str, metric: str, runtime: float) -> None:
68    def add_metric_score(self, metric_score: float, algorithm: str, metric: str, runtime: float) -> None:
69        """Adds a metric result of an algorithm/dataset to the result container.
70        :param metric_score: calculated metric score as float
71        :param algorithm: name of the calculated algorithm
72        :param metric: name of the used metric
73        :param runtime: runtime of the metric execution in seconds
74        """
75
76        if (algorithm not in self._algorithms
77                or metric not in self._metrics):
78            raise ResultSetInconsistentException()
79        if self._indexes.get(algorithm) is None:
80            raise ResultSetInconsistentException()
81        self._metric_scores[algorithm][metric] = metric_score
82        self._algorithm_runtimes[algorithm][metric] = runtime

Adds a metric result of an algorithm/dataset to the result container.

Parameters
  • metric_score: calculated metric score as float
  • algorithm: name of the calculated algorithm
  • metric: name of the used metric
  • runtime: runtime of the metric execution in seconds
def add_error( self, exception: Exception, error_type: ErrorType, algorithm: str = None, metric: str = None) -> None:
84    def add_error(self, exception: Exception, error_type: ErrorType, algorithm: str = None, metric: str = None) -> None:
85        """Adds a thrown error to the result container.
86        :param exception: the thrown exception object
87        :param error_type: the error type of the thrown exception
88        :param algorithm: name of the algorithm where the exception occurred if applicable
89        :param metric: name of the metric where the exception occurred if applicable
90        """
91
92        self._errors.append((type(exception).__name__, ''.join(traceback.format_exception(None, exception,
93                                                                                          exception.__traceback__)),
94                             error_type, algorithm, metric))

Adds a thrown error to the result container.

Parameters
  • exception: the thrown exception object
  • error_type: the error type of the thrown exception
  • algorithm: name of the algorithm where the exception occurred if applicable
  • metric: name of the metric where the exception occurred if applicable
def get_result_as_dict(self) -> dict:
 96    def get_result_as_dict(self) -> dict:
 97        """Returns the result container formatted as dictionary.
 98        :returns: the complete results with indexes, scores and metric scores of one dataset as dict
 99        """
100
101        return {
102            self._dataset: {
103                "indexes": self._indexes,
104                "scores": self._scores,
105                "metric_scores": self._metric_scores
106            }
107        }

Returns the result container formatted as dictionary. :returns: the complete results with indexes, scores and metric scores of one dataset as dict

def get_errors_as_list(self) -> list:
109    def get_errors_as_list(self) -> list:
110        """Returns the list of errors occurred around the dataset.
111        :returns: all errors of the dataset as python list
112        """
113
114        return [
115            {
116                "dataset": self._dataset,
117                "error_type": error[2],
118                "algorithm": error[3],
119                "metric": error[4],
120                "exception_type": error[0],
121                "trace_back": error[1]
122            }
123            for error in self._errors
124        ]

Returns the list of errors occurred around the dataset. :returns: all errors of the dataset as python list

def get_parameters(self) -> dict:
126    def get_parameters(self) -> dict:
127        """Returns the parameters of all included tasks as dict.
128        :returns: the parameters as python dict
129        """
130        return self._parameters

Returns the parameters of all included tasks as dict. :returns: the parameters as python dict

def get_runtimes(self) -> dict:
132    def get_runtimes(self) -> dict:
133        """Returns the runtimes of all included tasks as dict.
134        :returns: the runtimes as python dict
135        """
136        if self._dataset_runtime == -1:
137            result_dict = {self._dataset: self._algorithm_runtimes}
138        else:
139            result_dict = {
140                self._dataset: self._algorithm_runtimes | {
141                    "runtime": self._dataset_runtime,
142                }
143            }
144        return result_dict

Returns the runtimes of all included tasks as dict. :returns: the runtimes as python dict