cpdbench.control.ValidationRunController
1from cpdbench.control.CPDResult import CPDResult 2from cpdbench.control.CPDValidationResult import CPDValidationResult 3from cpdbench.control.ExecutionController import ExecutionController 4from cpdbench.exception.ValidationException import DatasetValidationException, AlgorithmValidationException, \ 5 MetricValidationException 6from cpdbench.utils import Logger 7 8 9class ValidationRunController(ExecutionController): 10 """A run configuration for validation runs. 11 These runs only execute algorithms with a (user defined) subset of the datasets, 12 and do not return the complete result sets. 13 """ 14 15 def __init__(self): 16 self._logger = Logger.get_application_logger() 17 super().__init__(self._logger) 18 19 def execute_run(self, methods: dict) -> CPDResult: 20 self._logger.info('Creating tasks...') 21 tasks = self._create_tasks(methods) 22 self._logger.info(f"{len(tasks['datasets']) + len(tasks['algorithms']) + len(tasks['metrics'])} tasks created") 23 24 exception_list = [] 25 26 self._logger.info('Begin validation') 27 for ds_task in tasks['datasets']: 28 try: 29 self._logger.debug(f"Validating {ds_task.get_task_name()}") 30 dataset = ds_task.validate_input() 31 except DatasetValidationException as e: 32 self._logger.debug(f"Error occurred when running {ds_task.get_task_name()}") 33 exception_list.append(e) 34 continue 35 self._logger.debug(f"Validated {ds_task.get_task_name()} without error") 36 data, ground_truth = dataset.get_validation_preview() 37 for algo_task in tasks['algorithms']: 38 try: 39 self._logger.debug(f"Validating {algo_task.get_task_name()}") 40 indexes, scores = algo_task.validate_input(data) 41 except AlgorithmValidationException as e: 42 self._logger.debug(f"Error occurred when running {algo_task.get_task_name()}") 43 exception_list.append(e) 44 continue 45 self._logger.debug(f"Validated {algo_task.get_task_name()} without error") 46 for metric_task in tasks['metrics']: 47 try: 48 self._logger.debug(f"Validating {metric_task.get_task_name()}") 49 metric_task.validate_input(indexes, scores, ground_truth) 50 except MetricValidationException as e: 51 self._logger.debug(f"Error occurred when running {metric_task.get_task_name()}") 52 exception_list.append(e) 53 continue 54 self._logger.debug(f"Validated {metric_task.get_task_name()} without error") 55 self._logger.info('Finished validation') 56 self._logger.info(f'{len(exception_list)} errors occurred') 57 validation_result = CPDValidationResult(exception_list, 58 list(map(lambda x: x.get_task_name(), tasks['datasets'])), 59 list(map(lambda x: x.get_task_name(), tasks['algorithms'])), 60 list(map(lambda x: x.get_task_name(), tasks['metrics']))) 61 for i in range(0, len(exception_list)): 62 self._logger.info(f"Error {i}") 63 self._logger.exception(exception_list[i], exc_info=exception_list[i]) 64 return validation_result
10class ValidationRunController(ExecutionController): 11 """A run configuration for validation runs. 12 These runs only execute algorithms with a (user defined) subset of the datasets, 13 and do not return the complete result sets. 14 """ 15 16 def __init__(self): 17 self._logger = Logger.get_application_logger() 18 super().__init__(self._logger) 19 20 def execute_run(self, methods: dict) -> CPDResult: 21 self._logger.info('Creating tasks...') 22 tasks = self._create_tasks(methods) 23 self._logger.info(f"{len(tasks['datasets']) + len(tasks['algorithms']) + len(tasks['metrics'])} tasks created") 24 25 exception_list = [] 26 27 self._logger.info('Begin validation') 28 for ds_task in tasks['datasets']: 29 try: 30 self._logger.debug(f"Validating {ds_task.get_task_name()}") 31 dataset = ds_task.validate_input() 32 except DatasetValidationException as e: 33 self._logger.debug(f"Error occurred when running {ds_task.get_task_name()}") 34 exception_list.append(e) 35 continue 36 self._logger.debug(f"Validated {ds_task.get_task_name()} without error") 37 data, ground_truth = dataset.get_validation_preview() 38 for algo_task in tasks['algorithms']: 39 try: 40 self._logger.debug(f"Validating {algo_task.get_task_name()}") 41 indexes, scores = algo_task.validate_input(data) 42 except AlgorithmValidationException as e: 43 self._logger.debug(f"Error occurred when running {algo_task.get_task_name()}") 44 exception_list.append(e) 45 continue 46 self._logger.debug(f"Validated {algo_task.get_task_name()} without error") 47 for metric_task in tasks['metrics']: 48 try: 49 self._logger.debug(f"Validating {metric_task.get_task_name()}") 50 metric_task.validate_input(indexes, scores, ground_truth) 51 except MetricValidationException as e: 52 self._logger.debug(f"Error occurred when running {metric_task.get_task_name()}") 53 exception_list.append(e) 54 continue 55 self._logger.debug(f"Validated {metric_task.get_task_name()} without error") 56 self._logger.info('Finished validation') 57 self._logger.info(f'{len(exception_list)} errors occurred') 58 validation_result = CPDValidationResult(exception_list, 59 list(map(lambda x: x.get_task_name(), tasks['datasets'])), 60 list(map(lambda x: x.get_task_name(), tasks['algorithms'])), 61 list(map(lambda x: x.get_task_name(), tasks['metrics']))) 62 for i in range(0, len(exception_list)): 63 self._logger.info(f"Error {i}") 64 self._logger.exception(exception_list[i], exc_info=exception_list[i]) 65 return validation_result
A run configuration for validation runs. These runs only execute algorithms with a (user defined) subset of the datasets, and do not return the complete result sets.
20 def execute_run(self, methods: dict) -> CPDResult: 21 self._logger.info('Creating tasks...') 22 tasks = self._create_tasks(methods) 23 self._logger.info(f"{len(tasks['datasets']) + len(tasks['algorithms']) + len(tasks['metrics'])} tasks created") 24 25 exception_list = [] 26 27 self._logger.info('Begin validation') 28 for ds_task in tasks['datasets']: 29 try: 30 self._logger.debug(f"Validating {ds_task.get_task_name()}") 31 dataset = ds_task.validate_input() 32 except DatasetValidationException as e: 33 self._logger.debug(f"Error occurred when running {ds_task.get_task_name()}") 34 exception_list.append(e) 35 continue 36 self._logger.debug(f"Validated {ds_task.get_task_name()} without error") 37 data, ground_truth = dataset.get_validation_preview() 38 for algo_task in tasks['algorithms']: 39 try: 40 self._logger.debug(f"Validating {algo_task.get_task_name()}") 41 indexes, scores = algo_task.validate_input(data) 42 except AlgorithmValidationException as e: 43 self._logger.debug(f"Error occurred when running {algo_task.get_task_name()}") 44 exception_list.append(e) 45 continue 46 self._logger.debug(f"Validated {algo_task.get_task_name()} without error") 47 for metric_task in tasks['metrics']: 48 try: 49 self._logger.debug(f"Validating {metric_task.get_task_name()}") 50 metric_task.validate_input(indexes, scores, ground_truth) 51 except MetricValidationException as e: 52 self._logger.debug(f"Error occurred when running {metric_task.get_task_name()}") 53 exception_list.append(e) 54 continue 55 self._logger.debug(f"Validated {metric_task.get_task_name()} without error") 56 self._logger.info('Finished validation') 57 self._logger.info(f'{len(exception_list)} errors occurred') 58 validation_result = CPDValidationResult(exception_list, 59 list(map(lambda x: x.get_task_name(), tasks['datasets'])), 60 list(map(lambda x: x.get_task_name(), tasks['algorithms'])), 61 list(map(lambda x: x.get_task_name(), tasks['metrics']))) 62 for i in range(0, len(exception_list)): 63 self._logger.info(f"Error {i}") 64 self._logger.exception(exception_list[i], exc_info=exception_list[i]) 65 return validation_result
Executes the run implemented by this class.
Parameters
- methods: dictionary with all given input functions, grouped by function type.
Returns
A result object which can be handed to the user