diff --git a/fitbenchmarking/core/fitting_benchmarking.py b/fitbenchmarking/core/fitting_benchmarking.py index d064c0d04..a31f1f555 100644 --- a/fitbenchmarking/core/fitting_benchmarking.py +++ b/fitbenchmarking/core/fitting_benchmarking.py @@ -8,7 +8,6 @@ import timeit import warnings -from contextlib import nullcontext import numpy as np from codecarbon import EmissionsTracker from tqdm import tqdm, trange @@ -126,6 +125,13 @@ def loop_over_benchmark_problems(problem_group, options, checkpointer): problems.append((p, parsed_problem)) name_index = {key: 0 for key in name_count} + + track_emissions = 'emissions' in options.table_type + if track_emissions: + emissions_tracker = EmissionsTracker() + else: + emissions_tracker = None + LOGGER.info('Running problems') if options.pbar: @@ -156,14 +162,19 @@ def loop_over_benchmark_problems(problem_group, options, checkpointer): loop_over_starting_values(problem, options=options, grabbed_output=grabbed_output, - checkpointer=checkpointer) + checkpointer=checkpointer, + emissions_tracker=emissions_tracker) results.extend(problem_results) failed_problems.extend(problem_fails) + if emissions_tracker: + _ = emissions_tracker.stop() + return results, failed_problems, unselected_minimizers -def loop_over_starting_values(problem, options, grabbed_output, checkpointer): +def loop_over_starting_values(problem, options, grabbed_output, checkpointer, + emissions_tracker): """ Loops over starting values from the fitting problem. @@ -209,7 +220,8 @@ def loop_over_starting_values(problem, options, grabbed_output, checkpointer): options=options, start_values_index=index, grabbed_output=grabbed_output, - checkpointer=checkpointer) + checkpointer=checkpointer, + emissions_tracker=emissions_tracker) # Checks to see if all of the minimizers from every software raised an # exception and record the problem name if that is the case @@ -226,7 +238,7 @@ def loop_over_starting_values(problem, options, grabbed_output, checkpointer): def loop_over_cost_function(problem, options, start_values_index, - grabbed_output, checkpointer): + grabbed_output, checkpointer, emissions_tracker): """ Run benchmarking for each cost function given in options. @@ -266,14 +278,16 @@ def loop_over_cost_function(problem, options, start_values_index, options=options, start_values_index=start_values_index, grabbed_output=grabbed_output, - checkpointer=checkpointer) + checkpointer=checkpointer, + emissions_tracker=emissions_tracker) problem_results.extend(individual_problem_results) return problem_results, unselected_minimizers -def loop_over_fitting_software(cost_func, options, start_values_index, - grabbed_output, checkpointer): +def loop_over_fitting_software(cost_func, options, + start_values_index, grabbed_output, + checkpointer, emissions_tracker): """ Loops over fitting software selected in the options @@ -330,7 +344,8 @@ def loop_over_fitting_software(cost_func, options, start_values_index, minimizers=minimizers, options=options, grabbed_output=grabbed_output, - checkpointer=checkpointer) + checkpointer=checkpointer, + emissions_tracker=emissions_tracker) unselected_minimizers[s] = minimizer_failed results.extend(problem_result) @@ -338,7 +353,7 @@ def loop_over_fitting_software(cost_func, options, start_values_index, def loop_over_minimizers(controller, minimizers, options, grabbed_output, - checkpointer): + checkpointer, emissions_tracker): """ Loops over minimizers in fitting software @@ -404,13 +419,15 @@ def loop_over_minimizers(controller, minimizers, options, grabbed_output, results = loop_over_jacobians(controller, options=options, grabbed_output=grabbed_output, - checkpointer=checkpointer) + checkpointer=checkpointer, + emissions_tracker=emissions_tracker) results_problem.extend(results) return results_problem, minimizer_failed -def loop_over_jacobians(controller, options, grabbed_output, checkpointer): +def loop_over_jacobians(controller, options, grabbed_output, checkpointer, + emissions_tracker): """ Loops over Jacobians set from the options file @@ -460,10 +477,13 @@ def loop_over_jacobians(controller, options, grabbed_output, checkpointer): ####################### # Loops over Hessians # ####################### - new_result = loop_over_hessians(controller, - options=options, - grabbed_output=grabbed_output, - checkpointer=checkpointer) + new_result = loop_over_hessians( + controller, + options=options, + grabbed_output=grabbed_output, + checkpointer=checkpointer, + emissions_tracker=emissions_tracker + ) results.extend(new_result) # For minimizers that do not accept jacobians we raise an @@ -478,7 +498,8 @@ def loop_over_jacobians(controller, options, grabbed_output, checkpointer): return results -def loop_over_hessians(controller, options, grabbed_output, checkpointer): +def loop_over_hessians(controller, options, grabbed_output, checkpointer, + emissions_tracker): """ Loops over Hessians set from the options file @@ -533,7 +554,7 @@ def loop_over_hessians(controller, options, grabbed_output, checkpointer): # Perform the fit a number of times specified by num_runs accuracy, runtimes, emissions = perform_fit( - controller, options, grabbed_output) + controller, options, grabbed_output, emissions_tracker) result_args = {'controller': controller, 'accuracy': accuracy, 'runtimes': runtimes, @@ -561,7 +582,7 @@ def loop_over_hessians(controller, options, grabbed_output, checkpointer): return new_result -def perform_fit(controller, options, grabbed_output): +def perform_fit(controller, options, grabbed_output, emissions_tracker): """ Performs a fit using the provided controller and its data. It will be run a number of times specified by num_runs. @@ -577,24 +598,19 @@ def perform_fit(controller, options, grabbed_output): """ num_runs = options.num_runs - track_emissions = 'emissions' in options.table_type - if track_emissions: - emissions_tracker = EmissionsTracker() - else: - emissions_tracker = nullcontext() - emissions = np.inf + emissions = np.nan try: with grabbed_output: controller.validate() controller.prepare() - with emissions_tracker: - # Calls timeit repeat with repeat = num_runs and number = 1 - runtimes = timeit.Timer( - stmt=controller.execute - ).repeat(num_runs, 1) - if track_emissions: + if emissions_tracker: + emissions_tracker.start_task() + runtimes = timeit.Timer( + stmt=controller.execute + ).repeat(num_runs, 1) + if emissions_tracker: # stop emissions tracking after all runs have completed - emissions = emissions_tracker.final_emissions / num_runs + emissions = emissions_tracker.stop_task().emissions / num_runs controller.cleanup() controller.check_attributes() @@ -650,6 +666,10 @@ def perform_fit(controller, options, grabbed_output): # Reset the controller timer once exceptions have been handled controller.timer.reset() + # ensure emissions tracker has been stopped if emissions not set + if emissions == np.nan and emissions_tracker: + _ = emissions_tracker.stop_task() + if controller.flag in [3, 6, 7]: # If there was an exception, set the runtimes and # cost function value to be infinite diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_benchmark_problems.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_benchmark_problems.py index 23f9bc155..856cc874c 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_benchmark_problems.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_benchmark_problems.py @@ -66,6 +66,7 @@ def setUp(self): """ cost_func = make_cost_function() self.options = Options() + self.options.table_type = ['acc', 'runtime', 'compare', 'local_min'] self.options.software = ["scipy"] scipy_len = len(self.options.minimizers["scipy"]) bench_prob_dir = os.path.dirname(inspect.getfile(test_files)) diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_hessians.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_hessians.py index be8d2d396..e2168aeb0 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_hessians.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_hessians.py @@ -117,7 +117,6 @@ def setUp(self): self.grabbed_output = output_grabber.OutputGrabber(self.options) self.controller.parameter_set = 0 self.cp = Checkpoint(self.options) - self.options.table_type = ['acc', 'runtime', 'compare', 'local_min'] def tearDown(self) -> None: """ @@ -135,7 +134,8 @@ def test_single_hessian(self): _ = loop_over_hessians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) self.assertEqual(self.controller.count, 1) @patch.object(DummyController, "check_bounds_respected") @@ -152,7 +152,8 @@ def test_bounds_respected_func_called( _ = loop_over_hessians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) check_bounds_respected.assert_called() @patch.object(DummyController, "check_bounds_respected") @@ -169,7 +170,8 @@ def test_bounds_respected_func_not_called( _ = loop_over_hessians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) check_bounds_respected.assert_not_called() def test_max_runtime_exceeded(self): @@ -191,7 +193,8 @@ def test_max_runtime_exceeded(self): results = loop_over_hessians(controller, options=options, grabbed_output=grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) self.assertEqual(results[0].error_flag, 6) @patch.object(DummyController, "eval_chisq") @@ -207,7 +210,8 @@ def test_eval_chisq_called(self, fit, eval_chisq): _ = loop_over_hessians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) eval_chisq.assert_called_once() @patch.object(DummyController, "eval_confidence") @@ -224,7 +228,8 @@ def test_eval_confidence_called(self, eval_confidence): _ = loop_over_hessians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) eval_confidence.assert_called_once() @run_for_test_types(TEST_TYPE, 'all') @@ -249,7 +254,8 @@ def test_multifit_num_results(self, perform_fit): results = loop_over_hessians(controller=controller, options=options, grabbed_output=grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) self.assertTrue(len(results) == 2) diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_jacobians.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_jacobians.py index e5449969f..7ff33c877 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_jacobians.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_jacobians.py @@ -120,7 +120,8 @@ def test_single_jacobian(self, loop_over_hessians): _ = loop_over_jacobians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) loop_over_hessians.assert_called_once() @patch(f'{FITTING_DIR}.loop_over_hessians') @@ -135,7 +136,8 @@ def test_multiple_jacobian(self, loop_over_hessians): _ = loop_over_jacobians(self.controller, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) self.assertEqual(loop_over_hessians.call_count, 2) diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_minimizers.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_minimizers.py index b27ca1e06..76d4518f7 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_minimizers.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_minimizers.py @@ -123,7 +123,8 @@ def test_run_minimzers_none_selected(self): self.minimizers, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert results_problem == [] assert minimizer_failed == self.minimizers @@ -142,7 +143,8 @@ def test_run_minimzers_selected(self, loop_over_hessians): self.minimizers, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert all(isinstance(x, fitbm_result.FittingResult) for x in results_problem) assert minimizer_failed == ["deriv_free_algorithm"] @@ -161,7 +163,8 @@ def test_run_minimzers_all(self, loop_over_hessians): self.minimizers, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert all(isinstance(x, fitbm_result.FittingResult) for x in results_problem) assert minimizer_failed == [] @@ -180,7 +183,8 @@ def test_no_bounds_minimizer(self): self.minimizers, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert results_problem[0].error_flag == 4 assert minimizer_failed == [] diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_software.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_software.py index e8904e633..bdd549cd8 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_software.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_software.py @@ -105,7 +105,8 @@ def shared_test(self, expected_list_len, expected_minimizer_failed): options=self.options, start_values_index=self.start_values_index, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert len(results) == expected_list_len dict_test(unselected_minimzers, expected_minimizer_failed) @@ -207,7 +208,8 @@ def test_incorrect_software(self): options=self.options, start_values_index=self.start_values_index, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) if __name__ == "__main__": diff --git a/fitbenchmarking/core/tests/test_fitting_benchmarking_starting_values.py b/fitbenchmarking/core/tests/test_fitting_benchmarking_starting_values.py index b8cc3ae48..ffb4a3255 100644 --- a/fitbenchmarking/core/tests/test_fitting_benchmarking_starting_values.py +++ b/fitbenchmarking/core/tests/test_fitting_benchmarking_starting_values.py @@ -105,7 +105,8 @@ def shared_tests(self, expected_list_len, expected_problem_fails, = loop_over_starting_values(self.problem, options=self.options, grabbed_output=self.grabbed_output, - checkpointer=self.cp) + checkpointer=self.cp, + emissions_tracker=None) assert len(problem_results) == expected_list_len assert problem_fails == expected_problem_fails diff --git a/setup.py b/setup.py index b1abdcd76..55efd9245 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ 'pandas>=1.3', 'jinja2', 'configparser', - 'codecarbon<2.2.4', + 'codecarbon>=2.3.4', 'dash'], extras_require={'DFO': ['DFO-LS', 'dfogn'], 'SAS': ['sasmodels',