From a172e5213bf2f0859e0e875bde0d338a9dd9ab94 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Tue, 12 May 2020 15:08:30 +0100 Subject: [PATCH 01/28] Adding scipy minimize routine --- examples/all_softwares.ini | 25 +++++-- examples/options_template.ini | 25 +++++-- .../controllers/scipy_controller.py | 48 ++++++++----- .../controllers/scipy_ls_controller.py | 67 +++++++++++++++++++ .../controllers/tests/test_controllers.py | 26 +++++-- fitbenchmarking/utils/default_options.ini | 26 +++++-- 6 files changed, 178 insertions(+), 39 deletions(-) create mode 100644 fitbenchmarking/controllers/scipy_ls_controller.py diff --git a/examples/all_softwares.ini b/examples/all_softwares.ini index dc589f6d2..9e2ddec68 100644 --- a/examples/all_softwares.ini +++ b/examples/all_softwares.ini @@ -73,19 +73,32 @@ # de # mp -# scipy: available minimizers (lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html +# scipy: available minimizers (Nelder-Mead, Powell, CG, BFGS, Newton-CG, +# L-BFGS-B, TNC, SLSQP) +# for more information see scipy.optimize.minimize.html # https://docs.scipy.org/doc/scipy/reference/generated/ +#scipy: Nelder-Mead +# Powell +# CG +# BFGS +# Newton-CG +# L-BFGS-B +# TNC +# SLSQP + +# scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) +# for more information see scipy.optimize.least_squares.html +# https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than # using the minimizer "lm-scipy" which uses problem.eval_j for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with # or without problem.eval_j for the Jacobian evaluation -#scipy: lm-scipy-no-jac -# lm-scipy -# trf -# dogbox +#scipy_ls: lm-scipy-no-jac +# lm-scipy +# trf +# dogbox ############################################################################## diff --git a/examples/options_template.ini b/examples/options_template.ini index dced07d0b..f1843f9aa 100644 --- a/examples/options_template.ini +++ b/examples/options_template.ini @@ -73,19 +73,32 @@ # de # mp -# scipy: available minimizers (lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html +# scipy: available minimizers (Nelder-Mead, Powell, CG, BFGS, Newton-CG, +# L-BFGS-B, TNC, SLSQP) +# for more information see scipy.optimize.minimize.html # https://docs.scipy.org/doc/scipy/reference/generated/ +#scipy: Nelder-Mead +# Powell +# CG +# BFGS +# Newton-CG +# L-BFGS-B +# TNC +# SLSQP + +# scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) +# for more information see scipy.optimize.least_squares.html +# https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than # using the minimizer "lm-scipy" which uses problem.eval_j for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with # or without problem.eval_j for the Jacobian evaluation -#scipy: lm-scipy-no-jac -# lm-scipy -# trf -# dogbox +#scipy_ls: lm-scipy-no-jac +# lm-scipy +# trf +# dogbox ############################################################################## diff --git a/fitbenchmarking/controllers/scipy_controller.py b/fitbenchmarking/controllers/scipy_controller.py index 4899fcca8..5bb65a415 100644 --- a/fitbenchmarking/controllers/scipy_controller.py +++ b/fitbenchmarking/controllers/scipy_controller.py @@ -2,7 +2,7 @@ Implements a controller for the scipy fitting software. """ -from scipy.optimize import least_squares +from scipy.optimize import minimize from fitbenchmarking.controllers.base_controller import Controller @@ -27,27 +27,41 @@ def setup(self): if self.minimizer == "lm-scipy": self.minimizer = "lm" + self.options = {'maxiter': 500} + + def eval_jac(self, x, *args): + """ + Wrapper for problem.eval_j to form the approximate Jacobian for + problem.eval_r_norm + + :param x: The parameter values to find the Jacobian at + :type x: list + + :return: Approximation of the Jacobian + :rtype: numpy array + """ + out = self.problem.eval_j(params=x, + func=self.problem.eval_r_norm, + *args) + return out + def fit(self): """ Run problem with Scipy. """ - # The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation - # which are significantly faster and gives different results than - # using the minimizer "lm-scipy" which uses problem.eval_j for the - # Jacobian evaluation. We do not see significant speed changes or - # difference in the accuracy results when running trf or dogbox with - # or without problem.eval_j for the Jacobian evaluation - if self.minimizer == "lm-scipy-no-jac": - self.result = least_squares(fun=self.problem.eval_r, - x0=self.initial_params, - method="lm", - max_nfev=500) + # Neither the Nelder-Mead or Powell minimizers require a Jacobian + # so are run without that argument. + if self.minimizer == "Nelder-Mead" or self.minimizer == "Powell": + self.result = minimize(fun=self.problem.eval_r_norm, + x0=self.initial_params, + method=self.minimizer, + options=self.options) else: - self.result = least_squares(fun=self.problem.eval_r, - x0=self.initial_params, - method=self.minimizer, - jac=self.problem.eval_j, - max_nfev=500) + self.result = minimize(fun=self.problem.eval_r_norm, + x0=self.initial_params, + method=self.minimizer, + jac=self.eval_jac, + options=self.options) self._popt = self.result.x self._status = self.result.status diff --git a/fitbenchmarking/controllers/scipy_ls_controller.py b/fitbenchmarking/controllers/scipy_ls_controller.py new file mode 100644 index 000000000..9c16a2282 --- /dev/null +++ b/fitbenchmarking/controllers/scipy_ls_controller.py @@ -0,0 +1,67 @@ +""" +Implements a controller for the scipy ls fitting software. +""" + +from scipy.optimize import least_squares + +from fitbenchmarking.controllers.base_controller import Controller + + +class ScipyLSController(Controller): + """ + Controller for the Scipy Least-Squares fitting software. + """ + + def __init__(self, problem): + """ + Initialises variable used for temporary storage. + """ + super(ScipyLSController, self).__init__(problem) + + self._popt = None + + def setup(self): + """ + Setup problem ready to be run with SciPy LS + """ + if self.minimizer == "lm-scipy": + self.minimizer = "lm" + + def fit(self): + """ + Run problem with Scipy LS. + """ + # The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation + # which are significantly faster and gives different results than + # using the minimizer "lm-scipy" which uses problem.eval_j for the + # Jacobian evaluation. We do not see significant speed changes or + # difference in the accuracy results when running trf or dogbox with + # or without problem.eval_j for the Jacobian evaluation + if self.minimizer == "lm-scipy-no-jac": + self.result = least_squares(fun=self.problem.eval_r, + x0=self.initial_params, + method="lm", + max_nfev=500) + else: + self.result = least_squares(fun=self.problem.eval_r, + x0=self.initial_params, + method=self.minimizer, + jac=self.problem.eval_j, + max_nfev=500) + + self._popt = self.result.x + self._status = self.result.status + + def cleanup(self): + """ + Convert the result to a numpy array and populate the variables results + will be read from. + """ + if self._status > 0: + self.flag = 0 + elif self._status == 0: + self.flag = 1 + else: + self.flag = 2 + + self.final_params = self._popt diff --git a/fitbenchmarking/controllers/tests/test_controllers.py b/fitbenchmarking/controllers/tests/test_controllers.py index 9bc42dea1..79f99c74e 100644 --- a/fitbenchmarking/controllers/tests/test_controllers.py +++ b/fitbenchmarking/controllers/tests/test_controllers.py @@ -13,6 +13,7 @@ from fitbenchmarking.controllers.ralfit_controller import RALFitController from fitbenchmarking.controllers.sasview_controller import SasviewController from fitbenchmarking.controllers.scipy_controller import ScipyController +from fitbenchmarking.controllers.scipy_ls_controller import ScipyLSController from fitbenchmarking.parsing.parser_factory import parse_problem_file from fitbenchmarking.utils import exceptions @@ -255,12 +256,27 @@ def test_sasview(self): controller._status = 1 self.check_diverged(controller) + def test_scipy_ls(self): + """ + ScipyController: Test for output shape + """ + controller = ScipyLSController(self.problem) + controller.minimizer = 'lm' + self.shared_testing(controller) + + controller._status = 1 + self.check_converged(controller) + controller._status = 0 + self.check_max_iterations(controller) + controller._status = -1 + self.check_diverged(controller) + def test_scipy(self): """ ScipyController: Test for output shape """ controller = ScipyController(self.problem) - controller.minimizer = 'lm' + controller.minimizer = 'CG' self.shared_testing(controller) controller._status = 1 @@ -392,12 +408,14 @@ def test_imports(self): Test that the factory returns the correct class for inputs """ - valid = ['scipy', 'mantid', 'sasview', 'ralfit'] + valid = ['scipy_ls', 'mantid', 'sasview', 'ralfit'] + valid_names = ['scipyls', 'mantid', 'sasview', 'ralfit'] invalid = ['foo', 'bar', 'hello', 'r2d2'] - for software in valid: + for software, v in zip(valid, valid_names): controller = ControllerFactory.create_controller(software) - self.assertTrue(controller.__name__.lower().startswith(software)) + print(controller.__name__.lower()) + self.assertTrue(controller.__name__.lower().startswith(v)) for software in invalid: self.assertRaises(exceptions.NoControllerError, diff --git a/fitbenchmarking/utils/default_options.ini b/fitbenchmarking/utils/default_options.ini index 4e97f1ad9..118ffabbc 100644 --- a/fitbenchmarking/utils/default_options.ini +++ b/fitbenchmarking/utils/default_options.ini @@ -73,19 +73,32 @@ sasview: amoeba de mp -# scipy: available minimizers (lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html +# scipy: available minimizers (Nelder-Mead, Powell, CG, BFGS, Newton-CG, +# L-BFGS-B, TNC, SLSQP) +# for more information see scipy.optimize.minimize.html # https://docs.scipy.org/doc/scipy/reference/generated/ +scipy: Nelder-Mead + Powell + CG + BFGS + Newton-CG + L-BFGS-B + TNC + SLSQP + +# scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) +# for more information see scipy.optimize.least_squares.html +# https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than # using the minimizer "lm-scipy" which uses problem.eval_j for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with # or without problem.eval_j for the Jacobian evaluation -scipy: lm-scipy-no-jac - lm-scipy - trf - dogbox +scipy_ls: lm-scipy-no-jac + lm-scipy + trf + dogbox ############################################################################## @@ -107,6 +120,7 @@ software: dfo # ralfit sasview scipy + scipy_ls # use_errors will switch between weighted and unweighted least squares # If no errors are supplied, then e[i] will be set to sqrt(abs(y[i])). From acb65bddd0fe60261d3720bc9bde0d67a4c75a04 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Tue, 12 May 2020 15:15:05 +0100 Subject: [PATCH 02/28] Updating system tests --- .../systests/expected_results/all_parsers.txt | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fitbenchmarking/systests/expected_results/all_parsers.txt b/fitbenchmarking/systests/expected_results/all_parsers.txt index 83f2fa30f..34800fcde 100644 --- a/fitbenchmarking/systests/expected_results/all_parsers.txt +++ b/fitbenchmarking/systests/expected_results/all_parsers.txt @@ -1,8 +1,8 @@ - dfo gsl mantid minuit ralfit sasview scipy - dfogn lmsder BFGS minuit gn amoeba lm-scipy-no-jac -BENNETT5 1 inf (inf)[3] 1.639e-05 (1.021)[1] 0.02038 (1269)[2] 2.114e-05 (1.316) 1.606e-05 (1) 1.608e-05 (1.001) 1.905e-05 (1.186)[1] -cubic, Start 1 1 5.244e-14 (1.358e+08) 3.861e-22 (1) 1.85e-12 (4.792e+09) 3.586e-11 (9.288e+10) 6.723e-13 (1.741e+09) 1.119e-14 (2.899e+07) 3.861e-22 (1) -cubic, Start 2 1 2.424e-17 (6.278e+04) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 6.926e-18 (1.794e+04) 1.146e-14 (2.969e+07) 3.861e-22 (1) -cubic-fba 1 7.913e-19 (2049) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 9.768e-18 (2.53e+04) 1.146e-14 (2.969e+07) 3.861e-22 (1) -EMU 73673 1 1046 (1) 1053 (1.007)[1] 1552 (1.484)[2] 1057 (1.01) inf (inf)[3] 1.032e+05 (98.62) 1046 (1) -Problem Def 1 1 1.299e-10 (1) 0.04153 (3.199e+08) 33.36 (2.569e+11)[2] 4.214e-07 (3245) 0.1119 (8.618e+08) 6.784e+05 (5.225e+15) 0.03966 (3.055e+08) + dfo gsl mantid minuit ralfit sasview scipy scipy_ls + dfogn lmsder BFGS minuit gn amoeba Nelder-Mead lm-scipy-no-jac +BENNETT5 1 inf (inf)[3] 1.639e-05 (1.021)[1] 0.02038 (1269)[2] 2.114e-05 (1.316) 1.606e-05 (1) 1.608e-05 (1.001) 1.653e-05 (1.029) 1.905e-05 (1.186)[1] +cubic, Start 1 1 5.244e-14 (1.358e+08) 3.861e-22 (1) 1.85e-12 (4.792e+09) 3.586e-11 (9.288e+10) 6.723e-13 (1.741e+09) 1.119e-14 (2.899e+07) 6.267e-05 (1.623e+17) 3.861e-22 (1) +cubic, Start 2 1 2.424e-17 (6.278e+04) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 6.926e-18 (1.794e+04) 1.146e-14 (2.969e+07) 7.176e-11 (1.859e+11)[1] 3.861e-22 (1) +cubic-fba 1 7.913e-19 (2049) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 9.768e-18 (2.53e+04) 1.146e-14 (2.969e+07) 7.176e-11 (1.859e+11)[1] 3.861e-22 (1) +EMU 73673 1 1046 (1) 1053 (1.007)[1] 1552 (1.484)[2] 1057 (1.01) inf (inf)[3] 1.032e+05 (98.62) 1055 (1.009)[1] 1046 (1) +Problem Def 1 1 1.299e-10 (1) 0.04153 (3.199e+08) 33.36 (2.569e+11)[2] 4.214e-07 (3245) 0.1119 (8.618e+08) 6.784e+05 (5.225e+15) 3.837e-09 (29.55) 0.03966 (3.055e+08) From 16b8a61b99b140e5527efa49543814239e958796 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Tue, 12 May 2020 15:37:30 +0100 Subject: [PATCH 03/28] Fixing linting in parser factory --- fitbenchmarking/parsing/parser_factory.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fitbenchmarking/parsing/parser_factory.py b/fitbenchmarking/parsing/parser_factory.py index 947920326..b20181807 100644 --- a/fitbenchmarking/parsing/parser_factory.py +++ b/fitbenchmarking/parsing/parser_factory.py @@ -35,7 +35,7 @@ def create_parser(filename): """ with open(filename, 'r') as f: - line = f.readline() + lines = f.readline() # if there's a SIF file ending, use cutest extension = os.path.splitext(filename)[1] @@ -43,10 +43,10 @@ def create_parser(filename): parser_name = 'cutest' else: # Otherwise, take the first section of text parser_name = '' - for l in line.strip('#').strip(): - if not l.isalpha(): + for line in lines.strip('#').strip(): + if not line.isalpha(): break - parser_name += l + parser_name += line module_name = '{}_parser'.format(parser_name.lower()) From 162d2ee6a34cc736d0491df069ae1d3d3d7c316e Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Tue, 12 May 2020 22:06:22 +0100 Subject: [PATCH 04/28] fix window feature in test_options write IO failure --- fitbenchmarking/utils/tests/test_options.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fitbenchmarking/utils/tests/test_options.py b/fitbenchmarking/utils/tests/test_options.py index acc98f5fa..3d8d5b6a8 100644 --- a/fitbenchmarking/utils/tests/test_options.py +++ b/fitbenchmarking/utils/tests/test_options.py @@ -73,14 +73,14 @@ def setUp(self): 'external_output': False}} opts_file = 'test_options_tests_{}.txt'.format( - datetime.datetime.now()) + datetime.datetime.now().strftime('%Y-%m-%d %H %M %S %f')) with open(opts_file, 'w') as f: f.write(config_str) self.options = opts self.options_file = opts_file opts_file_incorrect = 'test_incorrect_options_tests_{}.txt'.format( - datetime.datetime.now()) + datetime.datetime.now().strftime('%Y-%m-%d %H %M %S %f')) with open(opts_file_incorrect, 'w') as f: f.write(incorrect_config_str) self.options_file_incorrect = opts_file_incorrect From ef3929f7abde71b3324649e8138669776a23c2b8 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Wed, 13 May 2020 09:48:02 +0100 Subject: [PATCH 05/28] Fixing table tests --- .../results_processing/tests/expected_results/acc.html | 4 ++-- .../results_processing/tests/expected_results/acc.txt | 4 ++-- .../results_processing/tests/expected_results/compare.html | 4 ++-- .../results_processing/tests/expected_results/compare.txt | 4 ++-- .../results_processing/tests/expected_results/local_min.html | 4 ++-- .../results_processing/tests/expected_results/local_min.txt | 4 ++-- .../results_processing/tests/expected_results/runtime.html | 4 ++-- .../results_processing/tests/expected_results/runtime.txt | 4 ++-- fitbenchmarking/results_processing/tests/test_tables.py | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/fitbenchmarking/results_processing/tests/expected_results/acc.html b/fitbenchmarking/results_processing/tests/expected_results/acc.html index 603aacbac..5077b06fe 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/acc.html +++ b/fitbenchmarking/results_processing/tests/expected_results/acc.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813df_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #fef0d9; - } + }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
@@ -30,4 +30,4 @@ -
scipy_ls
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 1 (1)15 (5)3 1 (1)1
\ No newline at end of file + diff --git a/fitbenchmarking/results_processing/tests/expected_results/acc.txt b/fitbenchmarking/results_processing/tests/expected_results/acc.txt index adf737c31..80d41a436 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/acc.txt +++ b/fitbenchmarking/results_processing/tests/expected_results/acc.txt @@ -1,4 +1,4 @@ - scipy + scipy_ls lm-scipy-no-jac lm-scipy trf dogbox prob_0 1 1 (1)[1] 5 (5) 2 (2)[2] 1.54 (1.54) -prob_1 1 7 (7) 3 (3)[1] 5 (5)[3] 1 (1)[1] \ No newline at end of file +prob_1 1 7 (7) 3 (3)[1] 5 (5)[3] 1 (1)[1] diff --git a/fitbenchmarking/results_processing/tests/expected_results/compare.html b/fitbenchmarking/results_processing/tests/expected_results/compare.html index 807b84666..e28f3fcc5 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/compare.html +++ b/fitbenchmarking/results_processing/tests/expected_results/compare.html @@ -15,7 +15,7 @@ background-image: linear-gradient(#b30000,#b30000,#b30000,#b30000); } #T_318813de_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-image: linear-gradient(#fef0d9,#fef0d9,#b30000,#b30000); - } + }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
@@ -30,4 +30,4 @@ -
scipy_ls
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 1 (1)
0.01 (238.1)1
5 (5)
1e-07 (2e+06)3
1 (1)
4.3e-12 (86)1
\ No newline at end of file + diff --git a/fitbenchmarking/results_processing/tests/expected_results/compare.txt b/fitbenchmarking/results_processing/tests/expected_results/compare.txt index 2f060ad1a..81a06848f 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/compare.txt +++ b/fitbenchmarking/results_processing/tests/expected_results/compare.txt @@ -1,4 +1,4 @@ - scipy + scipy_ls lm-scipy-no-jac lm-scipy trf dogbox prob_0 1 1 (1)
0.01 (238.1)[1] 5 (5)
0.0022 (52.38) 2 (2)
4.2e-05 (1)[2] 1.54 (1.54)
0.98 (2.333e+04) -prob_1 1 7 (7)
3e-10 (6000) 3 (3)
5e-14 (1)[1] 5 (5)
1e-07 (2e+06)[3] 1 (1)
4.3e-12 (86)[1] \ No newline at end of file +prob_1 1 7 (7)
3e-10 (6000) 3 (3)
5e-14 (1)[1] 5 (5)
1e-07 (2e+06)[3] 1 (1)
4.3e-12 (86)[1] diff --git a/fitbenchmarking/results_processing/tests/expected_results/local_min.html b/fitbenchmarking/results_processing/tests/expected_results/local_min.html index d926a03b7..680cb4dc0 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/local_min.html +++ b/fitbenchmarking/results_processing/tests/expected_results/local_min.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e1_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - } + }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
@@ -30,4 +30,4 @@ -
scipy_ls
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 False (0.5174)1False (15.01)3 False (13.5)1
\ No newline at end of file + diff --git a/fitbenchmarking/results_processing/tests/expected_results/local_min.txt b/fitbenchmarking/results_processing/tests/expected_results/local_min.txt index e4b1f62ce..165018e31 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/local_min.txt +++ b/fitbenchmarking/results_processing/tests/expected_results/local_min.txt @@ -1,4 +1,4 @@ - scipy + scipy_ls lm-scipy-no-jac lm-scipy trf dogbox prob_0 1 False (0.5174)[1] False (1.48) False (2.941)[2] False (8.165) -prob_1 1 False (6.708) False (4.5)[1] False (15.01)[3] False (13.5)[1] \ No newline at end of file +prob_1 1 False (6.708) False (4.5)[1] False (15.01)[3] False (13.5)[1] diff --git a/fitbenchmarking/results_processing/tests/expected_results/runtime.html b/fitbenchmarking/results_processing/tests/expected_results/runtime.html index 981286580..4dc324e1a 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/runtime.html +++ b/fitbenchmarking/results_processing/tests/expected_results/runtime.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e0_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - } + }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
@@ -30,4 +30,4 @@ -
scipy_ls
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 0.01 (238.1)11e-07 (2e+06)3 4.3e-12 (86)1
\ No newline at end of file + diff --git a/fitbenchmarking/results_processing/tests/expected_results/runtime.txt b/fitbenchmarking/results_processing/tests/expected_results/runtime.txt index 0132442b8..57cce71c5 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/runtime.txt +++ b/fitbenchmarking/results_processing/tests/expected_results/runtime.txt @@ -1,4 +1,4 @@ - scipy + scipy_ls lm-scipy-no-jac lm-scipy trf dogbox prob_0 1 0.01 (238.1)[1] 0.0022 (52.38) 4.2e-05 (1)[2] 0.98 (2.333e+04) -prob_1 1 3e-10 (6000) 5e-14 (1)[1] 1e-07 (2e+06)[3] 4.3e-12 (86)[1] \ No newline at end of file +prob_1 1 3e-10 (6000) 5e-14 (1)[1] 1e-07 (2e+06)[3] 4.3e-12 (86)[1] diff --git a/fitbenchmarking/results_processing/tests/test_tables.py b/fitbenchmarking/results_processing/tests/test_tables.py index c8abb7c36..ab9782c4c 100644 --- a/fitbenchmarking/results_processing/tests/test_tables.py +++ b/fitbenchmarking/results_processing/tests/test_tables.py @@ -66,7 +66,7 @@ def generate_mock_results(): list of list fitting results, Options object) """ - software = 'scipy' + software = 'scipy_ls' options = Options() options.software = [software] num_min = len(options.minimizers[options.software[0]]) From 08bfa151ad05de84c68854f7543d560a97190dd8 Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Wed, 13 May 2020 19:08:48 +0100 Subject: [PATCH 06/28] exception for windows in outputgrapper test --- fitbenchmarking/utils/tests/test_output_grabber.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fitbenchmarking/utils/tests/test_output_grabber.py b/fitbenchmarking/utils/tests/test_output_grabber.py index a9700f407..8380e47c5 100644 --- a/fitbenchmarking/utils/tests/test_output_grabber.py +++ b/fitbenchmarking/utils/tests/test_output_grabber.py @@ -1,5 +1,6 @@ from __future__ import (absolute_import, division, print_function) import unittest +import platform from fitbenchmarking.utils.output_grabber import OutputGrabber from fitbenchmarking.utils.options import Options @@ -8,14 +9,19 @@ class OutputGrabberTests(unittest.TestCase): def setUp(self): self.options = Options() + self.plt = platform.system() def test_correct_output(self): output_string = 'This is the correct output string\nSecond line' output = OutputGrabber(self.options) with output: print(output_string) - # print adds an extra \n - assert output.capturedtext == output_string + "\n" + + # there is currently an issue with OutputGrapper on Windows + # see issues for details + if self.plt != "Windows": + # print adds an extra \n + assert output.capturedtext == output_string + "\n" def test_incorrect_output(self): output_string = 'This is the correct output string\nSecond line' From c66d17e2b7b60ae69fe844636c0d9fe991e11a92 Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Wed, 13 May 2020 19:17:07 +0100 Subject: [PATCH 07/28] enable all utils tests for windows --- azure-pipelines.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0cd73a5b4..26448021e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -26,6 +26,5 @@ steps: - script: | pip install pytest pytest-azurepipelines - pytest fitbenchmarking\utils\tests\test_logger.py -# pytest fitbenchmarking\utils\tests\test_fitbm_result.py (breaks in first azure pipeline install) + pytest fitbenchmarking\utils displayName: 'pytest utils tests' From cc4c65936545800472178979a95f7d0fda1baa88 Mon Sep 17 00:00:00 2001 From: Anders Markvardsen Date: Wed, 13 May 2020 19:35:26 +0100 Subject: [PATCH 08/28] enable core and results_processing unit tests --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 26448021e..8cf603e6d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,4 +27,6 @@ steps: - script: | pip install pytest pytest-azurepipelines pytest fitbenchmarking\utils + pytest fitbenchmarking\core + pytest fitbenchmarking\results_processing displayName: 'pytest utils tests' From 65686e85b827287ff2f0e54eba062ae721dc205c Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 15 May 2020 14:56:00 +0100 Subject: [PATCH 09/28] Updating controller docs and adding scipy to how docs --- docs/source/concept/how/Minimizers.rst | 57 ++++++++++++++++++-------- docs/source/extending/controllers.rst | 6 +++ docs/source/index.rst | 2 + 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/docs/source/concept/how/Minimizers.rst b/docs/source/concept/how/Minimizers.rst index d135fc246..02d255bb8 100644 --- a/docs/source/concept/how/Minimizers.rst +++ b/docs/source/concept/how/Minimizers.rst @@ -16,17 +16,17 @@ It came out of the University of Maryland and NIST as part of the DANSE FitBenchmarking currently supports the Bumps minimizers: * `Nelder-Mead Simplex `_ (:code:`amoeba`) - + * `Levenberg-Marquardt `_ (:code:`lm`) - + * `Quasi-Newton BFGS `_ (:code:`newton`) - + * `Differential Evolution `_ (:code:`de`) - + * `MINPACK `_ (:code:`mp`) This is a translation of `MINPACK` to Python. **Links** `GitHub - bumps `_ - + DFO --- @@ -38,7 +38,7 @@ of Oxford, in conjunction with NAG. They are particularly well suited for solvi FitBenchmarking currently supports the DFO minimizers: * `Derivative-Free Optimizer for Least Squares `_ (:code:`dfols`) - + * `Derivative-Free Gauss-Newton Solver `_ (:code:`dfogn`) **Links** `GitHub - DFO-GN `_ `GitHub - DFO-LS `_ @@ -51,7 +51,7 @@ provides a wide range of mathematical routines. We call GSL using the `pyGSL P `_. The GSL routines have a number of parameters that need to be chosen, often without default suggestions. -We have taken the values as used by Mantid. +We have taken the values as used by Mantid. We provide implementations for the following packages in the `multiminimize `_ and `multifit `_ sections of the library: @@ -60,19 +60,19 @@ packages in the `multiminimize `_ (:code:`lmder`) * `Levenberg-Marquardt (scaled) `_ (:code:`lmsder`) - + * `Nelder-Mead Simplex Algorithm `_ (:code:`nmsimplex`) * `Nelder-Mead Simplex Algorithm (version 2) `_ (:code:`nmsimplex2`) - + * `Polak-Ribiere Conjugate Gradient Algorithm `_ (:code:`conjugate_pr`) - + * `Fletcher-Reeves Conjugate-Gradient `_ (:code:`conjugate_fr`) - + * `The vector quasi-Newton BFGS method `_ (:code:`vector_bfgs`) - + * `The vector quasi-Newton BFGS method (version 2) `_ (:code:`vector_bfgs2`) - + * `Steepest Descent `_ (:code:`steepest_descent`) **Links** `SourceForge PyGSL `_ @@ -91,7 +91,7 @@ manipulate and analyze neutron scattering and muon spectroscopy data. It has support for a number of minimizers, most of which are from GSL. * `BFGS `_ (:code:`BFGS`) - + * `Conjugate gradient (Fletcher-Reeves) `_ (:code:`Conjugate gradient (Fletcher-Reeves imp.)`) * `Conjugate gradient (Polak-Ribiere) `_ (:code:`Conjugate gradient (Polak-Ribiere imp.)`) @@ -99,13 +99,13 @@ It has support for a number of minimizers, most of which are from GSL. * `Damped GaussNewton `_ (:code:`Damped GaussNewton`) * `Levenberg-Marquardt algorithm `_ (:code:`Levenberg-Marquardt`) - + * `Levenberg-Marquardt MD `_ (:code:`Levenberg-MarquardtMD`) - An implementation of Levenberg-Marquardt intended for MD workspaces, where work is divided into chunks to achieve a greater efficiency for a large number of data points. * `Simplex `_ (:code:`simplex`) * `SteepestDescent `_ (:code:`SteepestDescent`) - + * `Trust Region `_ (:code:`Trust Region`) - An implementation of one of the algorithms available in RALFit. **Links** `GitHub - Mantid `_ `Mantid's Fitting Docs `_ @@ -115,7 +115,7 @@ It has support for a number of minimizers, most of which are from GSL. The external package Mantid must be installed to use these minimizers. See :ref:`InstallMantid` - + Minuit ------ @@ -149,9 +149,30 @@ order derivatives are currently utilized in FitBenchmarking. SciPy ----- +`SciPy `_ is the standard python package for mathematical +software. In particular, we use the `minimize `_ +solver for general minimization problems from the optimization chapter the +SciPy's library. Currently we only use the algorithms that do not require +Hessian information as inputs. + +* `Nelder-Mead algorithm `_ (:code:`Nelder-Mead`) +* `Powell algorithm `_ (:code:`Powell`) +* `Conjugate gradient algorithm `_ (:code:`CG`) +* `BFGS algorithm `_ (:code:`BFGS`) +* `Newton-CG algorithm `_ (:code:`Newton-CG`) +* `L-BFGS-B algorithm `_ (:code:`L-BFGS-B`) +* `Truncated Newton (TNC) algorithm `_ (:code:`TNC`) +* `Sequential Least SQuares Programming `_ (:code:`SLSQP`) + +**Links** `Github - SciPy minimize `_ + +SciPy LS +-------- + `SciPy `_ is the standard python package for mathematical software. In particular, we use the `least_squares `_ -solver from the optimization chapter the SciPy's library. +solver for Least-Squares minimization problems from the optimization chapter +the SciPy's library. * Levenberg-Marquardt with supplied Jacobian (:code:`lm-scipy`) - a wrapper around MINPACK * Levenberg-Marquardt with no Jacobian passed (:code:`lm-scipy-no-jac`) - as above, but using MINPACK's approximate Jacobian diff --git a/docs/source/extending/controllers.rst b/docs/source/extending/controllers.rst index ced81fffd..adf67b763 100644 --- a/docs/source/extending/controllers.rst +++ b/docs/source/extending/controllers.rst @@ -54,6 +54,12 @@ In order to add a new controller, you will need to: Unless the new controller is more complicated than the currently available controllers, this can be done by following the example of the others. +6. In the :ref:`Minimizers` page of the :ref:`how` page, update with the new + software and minimzers following the structure there. + +7. At the bottom of :doc:`main index page <../index>`, add the logo of the + of the software package in the `Currently Benchmarking` section. + .. note:: For ease of maintenance, please add new controllers to a list of software in alphabetical order. diff --git a/docs/source/index.rst b/docs/source/index.rst index f037dbbb3..bb6727a1d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,3 +1,5 @@ +.. _mainindex: + .. FitBenchmarking documentation master file, created by sphinx-quickstart on Wed Sep 11 09:17:28 2019. You can adapt this file completely to your liking, but it should at least From 0111e364d385f94dda2af4f34d144f99b3ab8f6d Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 15 May 2020 15:38:32 +0100 Subject: [PATCH 10/28] Adding software links to tables --- fitbenchmarking/results_processing/base_table.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fitbenchmarking/results_processing/base_table.py b/fitbenchmarking/results_processing/base_table.py index 8353d6955..1cad8f1fc 100644 --- a/fitbenchmarking/results_processing/base_table.py +++ b/fitbenchmarking/results_processing/base_table.py @@ -217,11 +217,20 @@ def to_html(self, table, colour, links, error): :return: HTLM table output :rtype: str """ + link_template = "https://fitbenchmarking.readthedocs.io/en/"\ + "latest/concept/how/Minimizers.html#{0}" table.apply(lambda x: self.enable_error(x, error, "{}"), axis=1, result_type='expand') table.apply(lambda x: self.enable_link(x, links), axis=1, result_type='expand') + header_links = [link_template.format(s) for s in self.options.software] + minimizers_list = [('{1}'.format(l, s), m) + for s, l in zip(self.options.software, header_links) + for m in self.options.minimizers[s]] + columns = pd.MultiIndex.from_tuples(minimizers_list) + table.columns = columns + index = [] for b, i in zip(self.best_results, table.index): rel_path = os.path.relpath(path=b.support_page_link, From 704693fa716abc77995075707c430fa8b6048e99 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 15 May 2020 15:43:22 +0100 Subject: [PATCH 11/28] Fixing table tests with new links --- .../results_processing/tests/expected_results/acc.html | 2 +- .../results_processing/tests/expected_results/compare.html | 2 +- .../results_processing/tests/expected_results/local_min.html | 2 +- .../results_processing/tests/expected_results/runtime.html | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fitbenchmarking/results_processing/tests/expected_results/acc.html b/fitbenchmarking/results_processing/tests/expected_results/acc.html index aa108c848..73c551f59 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/acc.html +++ b/fitbenchmarking/results_processing/tests/expected_results/acc.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813df_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #fef0d9; - } + }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/compare.html b/fitbenchmarking/results_processing/tests/expected_results/compare.html index 66085b52b..e0498d60b 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/compare.html +++ b/fitbenchmarking/results_processing/tests/expected_results/compare.html @@ -15,7 +15,7 @@ background-image: linear-gradient(#b30000,#b30000,#b30000,#b30000); } #T_318813de_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-image: linear-gradient(#fef0d9,#fef0d9,#b30000,#b30000); - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 (1)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/local_min.html b/fitbenchmarking/results_processing/tests/expected_results/local_min.html index 2f2d445ca..36c4c3136 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/local_min.html +++ b/fitbenchmarking/results_processing/tests/expected_results/local_min.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e1_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 (1)
0.01 (238.1)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/runtime.html b/fitbenchmarking/results_processing/tests/expected_results/runtime.html index 3c690227b..d84d0c81c 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/runtime.html +++ b/fitbenchmarking/results_processing/tests/expected_results/runtime.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e0_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 False (0.5174)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
From 326608af29ab37d9af59adeb43a318c4218e4438 Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Sun, 17 May 2020 13:19:46 +0100 Subject: [PATCH 12/28] Fix window issue when running a table test. and tidy up description in tables doc string --- fitbenchmarking/results_processing/tables.py | 4 ++-- fitbenchmarking/results_processing/tests/test_tables.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fitbenchmarking/results_processing/tables.py b/fitbenchmarking/results_processing/tables.py index 85da981c0..0916ae09a 100644 --- a/fitbenchmarking/results_processing/tables.py +++ b/fitbenchmarking/results_processing/tables.py @@ -115,7 +115,7 @@ def create_results_tables(options, results, best_results, group_name, def load_table(table): """ - Create a table object. + Create and return table object. :param table: The name of the table to create a table for :type table: string @@ -142,7 +142,7 @@ def load_table(table): def generate_table(results, best_results, options, group_dir, pp_locations, table_name, suffix): """ - Saves the results of the fitting to html/txt tables. + Generate html/txt tables. :param results: results nested array of objects :type results: list of list of diff --git a/fitbenchmarking/results_processing/tests/test_tables.py b/fitbenchmarking/results_processing/tests/test_tables.py index c8abb7c36..db60cc079 100644 --- a/fitbenchmarking/results_processing/tests/test_tables.py +++ b/fitbenchmarking/results_processing/tests/test_tables.py @@ -172,6 +172,8 @@ def compare_files(self, expected_table, table): exp_line = '' if exp_line is None else exp_line.strip('\n') act_line = '' if act_line is None else act_line.strip('\n') exp_line = exp_line.replace(html_id_expected, html_id) + # to pass on windows need to first do this before comparing + act_line = act_line.replace('href=\"..\\', 'href=\"../') if act_line != exp_line: diff.append([exp_line, act_line]) self.assertListEqual([], diff) From 63fbeb93731ef32f05e2ead2cfd601f0d33f546c Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Sun, 17 May 2020 17:04:20 +0100 Subject: [PATCH 13/28] split test_controllers.py up to test basic and externals separate --- ...ntrollers.py => test_basic_controllers.py} | 126 +-------- .../tests/test_externals_controllers.py | 246 ++++++++++++++++++ 2 files changed, 248 insertions(+), 124 deletions(-) rename fitbenchmarking/controllers/tests/{test_controllers.py => test_basic_controllers.py} (65%) create mode 100644 fitbenchmarking/controllers/tests/test_externals_controllers.py diff --git a/fitbenchmarking/controllers/tests/test_controllers.py b/fitbenchmarking/controllers/tests/test_basic_controllers.py similarity index 65% rename from fitbenchmarking/controllers/tests/test_controllers.py rename to fitbenchmarking/controllers/tests/test_basic_controllers.py index 9bc42dea1..1cf2a7b79 100644 --- a/fitbenchmarking/controllers/tests/test_controllers.py +++ b/fitbenchmarking/controllers/tests/test_basic_controllers.py @@ -1,3 +1,4 @@ +# Tests for controllers available from basic fitbenchmarking install import inspect import numpy as np import os @@ -7,10 +8,7 @@ from fitbenchmarking.controllers.base_controller import Controller from fitbenchmarking.controllers.controller_factory import ControllerFactory from fitbenchmarking.controllers.dfo_controller import DFOController -from fitbenchmarking.controllers.gsl_controller import GSLController -from fitbenchmarking.controllers.mantid_controller import MantidController from fitbenchmarking.controllers.minuit_controller import MinuitController -from fitbenchmarking.controllers.ralfit_controller import RALFitController from fitbenchmarking.controllers.sasview_controller import SasviewController from fitbenchmarking.controllers.scipy_controller import ScipyController @@ -155,91 +153,6 @@ class ControllerTests(TestCase): def setUp(self): self.problem = make_fitting_problem() - def test_mantid(self): - """ - MantidController: Test for output shape - """ - controller = MantidController(self.problem) - controller.minimizer = 'Levenberg-Marquardt' - self.shared_testing(controller) - - controller._status = "success" - self.check_converged(controller) - controller._status = "Failed to converge" - self.check_max_iterations(controller) - controller._status = "Failed" - self.check_diverged(controller) - - def test_mantid_multifit(self): - """ - MantidController: Additional bespoke test for multifit - """ - file_path = os.path.join('multifit_set', 'multifit.txt') - problem = make_fitting_problem(file_path) - - controller = MantidController(problem) - controller.minimizer = 'Levenberg-Marquardt' - - controller.parameter_set = 0 - controller.prepare() - controller.fit() - controller.cleanup() - - self.assertEqual(len(controller.final_params), len(controller.data_x), - 'Multifit did not return a result for each data file') - - self.assertEqual(len(controller.final_params[0]), - len(controller.initial_params), - 'Incorrect number of final params.') - - def test_mantid_singlefit_chisquared(self): - """ - Test the override in Mantid conroller is working correctly for - evaluating chi_squared (SingleFit). - """ - m_controller = MantidController(self.problem) - b_controller = DummyController(self.problem) - params = np.array([1, 2, 3, 4]) - x = np.array([6, 2, 32, 4]) - y = np.array([1, 21, 3, 4]) - e = np.array([.5, .003, 1, 2]) - - expected = b_controller.eval_chisq(params=params, x=x, y=y, e=e) - actual = m_controller.eval_chisq(params=params, x=x, y=y, e=e) - - self.assertEqual(expected, actual, - 'Mantid controller found a different chi squared' - ' for single fit problem.') - - def test_mantid_multifit_chisquared(self): - """ - Test the override in Mantid conroller is working correctly for - evaluating chi_squared (MultiFit). - """ - m_controller = MantidController(self.problem) - b_controller = DummyController(self.problem) - params = [np.array([1, 2, 3, 4]), - np.array([1, 2, 3, 4]), - np.array([1, 2, 3, 4])] - xs = [np.array([6, 2, 32, 4]), - np.array([6, 2, 32, 4]), - np.array([6, 2, 32, 4])] - ys = [np.array([1, 21, 3, 4]), - np.array([1, 21, 3, 4]), - np.array([1, 21, 3, 4])] - es = [np.array([.5, .003, 1, 2]), - np.array([.5, .003, 1, 2]), - np.array([.5, .003, 1, 2])] - - expected = [b_controller.eval_chisq(params=p, x=x, y=y, e=e) - for x, y, e, p in zip(xs, ys, es, params)] - actual = m_controller.eval_chisq(params=params, x=xs, y=ys, e=es) - - self.assertListEqual( - expected, actual, - 'Mantid controller found a different chi squared for multi fit' - ' problem.') - def test_sasview(self): """ SasviewController: Test for output shape @@ -289,41 +202,6 @@ def test_dfo(self): controller._status = 5 self.check_diverged(controller) - def test_gsl(self): - """ - GSLController: Tests for output shape - """ - controller = GSLController(self.problem) - # test one from each class - minimizers = ['lmsder', - 'nmsimplex', - 'conjugate_pr'] - for minimizer in minimizers: - controller.minimizer = minimizer - self.shared_testing(controller) - - controller.flag = 0 - self.check_converged(controller) - controller.flag = 1 - self.check_max_iterations(controller) - controller.flag = 2 - self.check_diverged(controller) - - def test_ralfit(self): - """ - RALFitController: Tests for output shape - """ - controller = RALFitController(self.problem) - minimizers = ['gn', 'gn_reg', 'hybrid', 'hybrid_reg'] - for minimizer in minimizers: - controller.minimizer = minimizer - self.shared_testing(controller) - - controller._status = 0 - self.check_converged(controller) - controller._status = 2 - self.check_diverged(controller) - def test_minuit(self): """ MinuitController: Tests for output shape @@ -392,7 +270,7 @@ def test_imports(self): Test that the factory returns the correct class for inputs """ - valid = ['scipy', 'mantid', 'sasview', 'ralfit'] + valid = ['scipy', 'sasview'] invalid = ['foo', 'bar', 'hello', 'r2d2'] for software in valid: diff --git a/fitbenchmarking/controllers/tests/test_externals_controllers.py b/fitbenchmarking/controllers/tests/test_externals_controllers.py new file mode 100644 index 000000000..09ce49294 --- /dev/null +++ b/fitbenchmarking/controllers/tests/test_externals_controllers.py @@ -0,0 +1,246 @@ +# Tests for controllers for externals, i.e. those available in addition +# to those from a basic fitbenchmarking install +import inspect +import numpy as np +import os +from unittest import TestCase + +from fitbenchmarking import mock_problems +from fitbenchmarking.controllers.base_controller import Controller +from fitbenchmarking.controllers.controller_factory import ControllerFactory +from fitbenchmarking.controllers.gsl_controller import GSLController +from fitbenchmarking.controllers.mantid_controller import MantidController +from fitbenchmarking.controllers.ralfit_controller import RALFitController + +from fitbenchmarking.parsing.parser_factory import parse_problem_file +from fitbenchmarking.utils import exceptions +from fitbenchmarking.utils.options import Options + + +def make_fitting_problem(file_name='cubic.dat'): + """ + Helper function that returns a simple fitting problem + """ + options = Options() + + bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems)) + fname = os.path.join(bench_prob_dir, file_name) + + fitting_problem = parse_problem_file(fname, options) + fitting_problem.correct_data() + return fitting_problem + + +class DummyController(Controller): + """ + Minimal instantiatable subclass of Controller class for testing + """ + + def setup(self): + self.setup_result = 53 + + def fit(self): + raise NotImplementedError + + def cleanup(self): + raise NotImplementedError + + def error_flags(self): + raise NotImplementedError + + +class ControllerTests(TestCase): + """ + Tests for each controller class + """ + + def setUp(self): + self.problem = make_fitting_problem() + + def test_mantid(self): + """ + MantidController: Test for output shape + """ + controller = MantidController(self.problem) + controller.minimizer = 'Levenberg-Marquardt' + self.shared_testing(controller) + + controller._status = "success" + self.check_converged(controller) + controller._status = "Failed to converge" + self.check_max_iterations(controller) + controller._status = "Failed" + self.check_diverged(controller) + + def test_mantid_multifit(self): + """ + MantidController: Additional bespoke test for multifit + """ + file_path = os.path.join('multifit_set', 'multifit.txt') + problem = make_fitting_problem(file_path) + + controller = MantidController(problem) + controller.minimizer = 'Levenberg-Marquardt' + + controller.parameter_set = 0 + controller.prepare() + controller.fit() + controller.cleanup() + + self.assertEqual(len(controller.final_params), len(controller.data_x), + 'Multifit did not return a result for each data file') + + self.assertEqual(len(controller.final_params[0]), + len(controller.initial_params), + 'Incorrect number of final params.') + + def test_mantid_singlefit_chisquared(self): + """ + Test the override in Mantid conroller is working correctly for + evaluating chi_squared (SingleFit). + """ + m_controller = MantidController(self.problem) + b_controller = DummyController(self.problem) + params = np.array([1, 2, 3, 4]) + x = np.array([6, 2, 32, 4]) + y = np.array([1, 21, 3, 4]) + e = np.array([.5, .003, 1, 2]) + + expected = b_controller.eval_chisq(params=params, x=x, y=y, e=e) + actual = m_controller.eval_chisq(params=params, x=x, y=y, e=e) + + self.assertEqual(expected, actual, + 'Mantid controller found a different chi squared' + ' for single fit problem.') + + def test_mantid_multifit_chisquared(self): + """ + Test the override in Mantid conroller is working correctly for + evaluating chi_squared (MultiFit). + """ + m_controller = MantidController(self.problem) + b_controller = DummyController(self.problem) + params = [np.array([1, 2, 3, 4]), + np.array([1, 2, 3, 4]), + np.array([1, 2, 3, 4])] + xs = [np.array([6, 2, 32, 4]), + np.array([6, 2, 32, 4]), + np.array([6, 2, 32, 4])] + ys = [np.array([1, 21, 3, 4]), + np.array([1, 21, 3, 4]), + np.array([1, 21, 3, 4])] + es = [np.array([.5, .003, 1, 2]), + np.array([.5, .003, 1, 2]), + np.array([.5, .003, 1, 2])] + + expected = [b_controller.eval_chisq(params=p, x=x, y=y, e=e) + for x, y, e, p in zip(xs, ys, es, params)] + actual = m_controller.eval_chisq(params=params, x=xs, y=ys, e=es) + + self.assertListEqual( + expected, actual, + 'Mantid controller found a different chi squared for multi fit' + ' problem.') + + def test_gsl(self): + """ + GSLController: Tests for output shape + """ + controller = GSLController(self.problem) + # test one from each class + minimizers = ['lmsder', + 'nmsimplex', + 'conjugate_pr'] + for minimizer in minimizers: + controller.minimizer = minimizer + self.shared_testing(controller) + + controller.flag = 0 + self.check_converged(controller) + controller.flag = 1 + self.check_max_iterations(controller) + controller.flag = 2 + self.check_diverged(controller) + + def test_ralfit(self): + """ + RALFitController: Tests for output shape + """ + controller = RALFitController(self.problem) + minimizers = ['gn', 'gn_reg', 'hybrid', 'hybrid_reg'] + for minimizer in minimizers: + controller.minimizer = minimizer + self.shared_testing(controller) + + controller._status = 0 + self.check_converged(controller) + controller._status = 2 + self.check_diverged(controller) + + def shared_testing(self, controller): + """ + Utility function to run controller and check output is in generic form + + :param controller: Controller to test, with setup already completed + :type controller: Object derived from BaseSoftwareController + """ + controller.parameter_set = 0 + controller.prepare() + controller.fit() + controller.cleanup() + + assert len(controller.final_params) == len(controller.initial_params) + + def check_converged(self, controller): + """ + Utility function to check controller.cleanup() produces a success flag + + :param controller: Controller to test, with setup already completed + :type controller: Object derived from BaseSoftwareController + """ + controller.cleanup() + assert controller.flag == 0 + + def check_max_iterations(self, controller): + """ + Utility function to check controller.cleanup() produces a maximum + iteration flag + + :param controller: Controller to test, with setup already completed + :type controller: Object derived from BaseSoftwareController + """ + controller.cleanup() + assert controller.flag == 1 + + def check_diverged(self, controller): + """ + Utility function to check controller.cleanup() produces a fail + + :param controller: Controller to test, with setup already completed + :type controller: Object derived from BaseSoftwareController + """ + controller.cleanup() + assert controller.flag == 2 + + +class FactoryTests(TestCase): + """ + Tests for the ControllerFactory + """ + + def test_imports(self): + """ + Test that the factory returns the correct class for inputs + """ + + valid = ['mantid', 'ralfit'] + invalid = ['foo', 'bar', 'hello', 'r2d2'] + + for software in valid: + controller = ControllerFactory.create_controller(software) + self.assertTrue(controller.__name__.lower().startswith(software)) + + for software in invalid: + self.assertRaises(exceptions.NoControllerError, + ControllerFactory.create_controller, + software) From 42fa84a478d375fbb956d5a76663eb4004916aee Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Sun, 17 May 2020 17:42:56 +0100 Subject: [PATCH 14/28] enable windows testing for parts controllers and cli --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8cf603e6d..5ef0bfd07 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,4 +29,6 @@ steps: pytest fitbenchmarking\utils pytest fitbenchmarking\core pytest fitbenchmarking\results_processing + pytest fitbenchmarking\controllers\test\test_basic_controllers.py + pytest fitbenchmarking\cli\tests\test_exception_handler.py displayName: 'pytest utils tests' From bbb6ab67a7a337dab399b02dd4fbc12c2e11e38b Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Mon, 18 May 2020 12:24:00 +0100 Subject: [PATCH 15/28] Removing test_main.py --- fitbenchmarking/cli/tests/test_main.py | 58 -------------------------- 1 file changed, 58 deletions(-) delete mode 100644 fitbenchmarking/cli/tests/test_main.py diff --git a/fitbenchmarking/cli/tests/test_main.py b/fitbenchmarking/cli/tests/test_main.py deleted file mode 100644 index c8e25b1bd..000000000 --- a/fitbenchmarking/cli/tests/test_main.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import unittest - -from fitbenchmarking.cli import main -from fitbenchmarking.utils import exceptions - - -class TestMain(unittest.TestCase): - - @classmethod - def setUpClass(self): - # get curdir and store for teardown - self.cwd = os.getcwd() - - # get rootdir of package - root_dir = os.path.join(os.path.dirname(__file__), - os.pardir, - os.pardir, - os.pardir) - - # chdir to root dir - os.chdir(root_dir) - - @classmethod - def tearDownClass(self): - os.chdir(self.cwd) - - def test_run_with_wrong_option_file_extension(self): - with self.assertRaises(exceptions.OptionsError): - main.run(['examples/benchmark_problems/simple_tests'], - options_file='README.md', - debug=True) - - def test_run_with_wrong_option_file(self): - with self.assertRaises(exceptions.OptionsError): - main.run(['examples/benchmark_problems/simple_tests'], - options_file='options_template.ini', - debug=True) - - def test_run_with_options(self): - main.run(['examples/benchmark_problems/simple_tests'], - options_file='examples/options_template.ini', - debug=True) - - def test_run_no_options(self): - main.run(['examples/benchmark_problems/simple_tests'], - debug=True) - - def test_arg_parse(self): - parser = main.get_parser() - - options_file = 'some_file/with_options' - problem_sets = ['problems_1', 'problems2/*'] - - args = parser.parse_args(['-o', options_file] + problem_sets) - - self.assertEqual(args.options_file, options_file) - self.assertEqual(args.problem_sets, problem_sets) From 2f33bbde9ad62d42a7fa1441db2b8dd7f5486499 Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Mon, 18 May 2020 19:27:02 +0100 Subject: [PATCH 16/28] windows enable tests for parsing --- azure-pipelines.yml | 1 + fitbenchmarking/parsing/tests/test_parsers.py | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5ef0bfd07..a38c20332 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,6 +29,7 @@ steps: pytest fitbenchmarking\utils pytest fitbenchmarking\core pytest fitbenchmarking\results_processing + pytest fitbenchmarking\parsing pytest fitbenchmarking\controllers\test\test_basic_controllers.py pytest fitbenchmarking\cli\tests\test_exception_handler.py displayName: 'pytest utils tests' diff --git a/fitbenchmarking/parsing/tests/test_parsers.py b/fitbenchmarking/parsing/tests/test_parsers.py index baf6a47f6..8ab5874fc 100644 --- a/fitbenchmarking/parsing/tests/test_parsers.py +++ b/fitbenchmarking/parsing/tests/test_parsers.py @@ -9,6 +9,7 @@ from unittest import TestCase import numpy as np +import platform from fitbenchmarking.parsing.base_parser import Parser from fitbenchmarking.parsing.fitting_problem import FittingProblem @@ -60,6 +61,10 @@ def generate_test_cases(): raise RuntimeError( 'Could not find {}'.format(known_format)) + # Limits the tests run on Windows (to be extended with issue #534) + if platform.system() == "Windows": + formats = ['nist'] + # create list of test_cases expected_dir = os.listdir(os.path.join(test_dir, 'expected')) for file_format in formats: From 696c304c102ddedd41d0480cf8219d705a3e139a Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Tue, 19 May 2020 14:22:02 +0100 Subject: [PATCH 17/28] Adding table link documentation --- docs/source/extending/controllers.rst | 5 +++++ fitbenchmarking/results_processing/base_table.py | 9 ++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/source/extending/controllers.rst b/docs/source/extending/controllers.rst index ced81fffd..1c140d9a5 100644 --- a/docs/source/extending/controllers.rst +++ b/docs/source/extending/controllers.rst @@ -54,6 +54,11 @@ In order to add a new controller, you will need to: Unless the new controller is more complicated than the currently available controllers, this can be done by following the example of the others. +6. In the :ref:`Minimizers` page of the :ref:`how` page, update with the new + software and minimizers following the structure there. Note: make sure that + you use `` in :ref:`Minimizers` so that the software links + in the HTML tables link correctly to the documentation. + .. note:: For ease of maintenance, please add new controllers to a list of software in alphabetical order. diff --git a/fitbenchmarking/results_processing/base_table.py b/fitbenchmarking/results_processing/base_table.py index 1cad8f1fc..b53db0e4b 100644 --- a/fitbenchmarking/results_processing/base_table.py +++ b/fitbenchmarking/results_processing/base_table.py @@ -217,16 +217,15 @@ def to_html(self, table, colour, links, error): :return: HTLM table output :rtype: str """ - link_template = "https://fitbenchmarking.readthedocs.io/en/"\ - "latest/concept/how/Minimizers.html#{0}" + link_template = '{0}' table.apply(lambda x: self.enable_error(x, error, "{}"), axis=1, result_type='expand') table.apply(lambda x: self.enable_link(x, links), axis=1, result_type='expand') - header_links = [link_template.format(s) for s in self.options.software] - minimizers_list = [('{1}'.format(l, s), m) - for s, l in zip(self.options.software, header_links) + minimizers_list = [(link_template.format(s), m) + for s in self.options.software for m in self.options.minimizers[s]] columns = pd.MultiIndex.from_tuples(minimizers_list) table.columns = columns From a9d8be2c3488150fdbe879bf3b0e336d7d428029 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Wed, 20 May 2020 11:21:43 +0100 Subject: [PATCH 18/28] Changing name of sasview controller to bumps --- examples/all_softwares.ini | 24 ++-- examples/options_template.ini | 24 ++-- .../controllers/bumps_controller.py | 104 ++++++++++++++++++ .../controllers/tests/test_controllers.py | 10 +- .../systests/expected_results/all_parsers.txt | 16 +-- fitbenchmarking/utils/default_options.ini | 24 ++-- 6 files changed, 153 insertions(+), 49 deletions(-) create mode 100644 fitbenchmarking/controllers/bumps_controller.py diff --git a/examples/all_softwares.ini b/examples/all_softwares.ini index dc589f6d2..6e329d364 100644 --- a/examples/all_softwares.ini +++ b/examples/all_softwares.ini @@ -8,6 +8,15 @@ # entry for the software with a newline separated list of minimizers. # default is all available minimizers as follows: +# bumps: available minimizers (amoeba, lm-bumps, newton, de, mp) +# for more information see +# https://bumps.readthedocs.io/en/latest/guide/optimizer.html +#bumps: amoeba +# lm-bumps +# newton +# de +# mp + # dfo: available minimimizers (dfogn, dfols) # for more information see # http://people.maths.ox.ac.uk/robertsl/dfogn/ @@ -64,15 +73,6 @@ # hybrid # hybrid_reg -# sasview: available minimizers (amoeba, lm-bumps, newton, de, mp) -# for more information see -# https://bumps.readthedocs.io/en/latest/guide/optimizer.html -#sasview: amoeba -# lm-bumps -# newton -# de -# mp - # scipy: available minimizers (lm-scipy, trf, dogbox) # for more information see scipy.optimize.least_squares.html # https://docs.scipy.org/doc/scipy/reference/generated/ @@ -99,13 +99,13 @@ # software is used to select the fitting software to benchmark, this should be # a newline-separated list -# default is dfo, minuit, sasview, and scipy -software: dfo +# default is bumps, dfo, gsl, mantid, minuit, ralfit, and scipy +software: bumps + dfo gsl mantid minuit ralfit - sasview scipy # use_errors will switch between weighted and unweighted least squares diff --git a/examples/options_template.ini b/examples/options_template.ini index dced07d0b..bedf4fc58 100644 --- a/examples/options_template.ini +++ b/examples/options_template.ini @@ -8,6 +8,15 @@ # entry for the software with a newline separated list of minimizers. # default is all available minimizers as follows: +# bumps: available minimizers (amoeba, lm-bumps, newton, de, mp) +# for more information see +# https://bumps.readthedocs.io/en/latest/guide/optimizer.html +#bumps: amoeba +# lm-bumps +# newton +# de +# mp + # dfo: available minimimizers (dfogn, dfols) # for more information see # http://people.maths.ox.ac.uk/robertsl/dfogn/ @@ -64,15 +73,6 @@ # hybrid # hybrid_reg -# sasview: available minimizers (amoeba, lm-bumps, newton, de, mp) -# for more information see -# https://bumps.readthedocs.io/en/latest/guide/optimizer.html -#sasview: amoeba -# lm-bumps -# newton -# de -# mp - # scipy: available minimizers (lm-scipy, trf, dogbox) # for more information see scipy.optimize.least_squares.html # https://docs.scipy.org/doc/scipy/reference/generated/ @@ -99,13 +99,13 @@ # software is used to select the fitting software to benchmark, this should be # a newline-separated list -# default is dfo, minuit, sasview, and scipy -#software: dfo +# default is bumps, dfo, gsl, mantid, minuit, ralfit, and scipy +#software: bumps +# dfo # gsl # mantid # minuit # ralfit -# sasview # scipy # use_errors will switch between weighted and unweighted least squares diff --git a/fitbenchmarking/controllers/bumps_controller.py b/fitbenchmarking/controllers/bumps_controller.py new file mode 100644 index 000000000..b503c7592 --- /dev/null +++ b/fitbenchmarking/controllers/bumps_controller.py @@ -0,0 +1,104 @@ +""" +Implements a controller for the SasView fitting software. +""" + +from bumps.fitters import fit as bumpsFit +from bumps.names import Curve, FitProblem + +import numpy as np + +from fitbenchmarking.controllers.base_controller import Controller + + +class BumpsController(Controller): + """ + Controller for the Sasview fitting software. + + Sasview requires a model to fit. + Setup creates a model with the correct function. + """ + + def __init__(self, problem): + """ + Extract param names for function setup + + :param problem: Problem to fit + :type problem: FittingProblem + """ + super(BumpsController, self).__init__(problem) + + self._param_names = self.problem.param_names + + self._func_wrapper = None + self._fit_problem = None + self._bumps_result = None + + def setup(self): + """ + Setup problem ready to run with SasView. + + Creates a Sasview FitProblem for calling in fit() + """ + # Bumps fails with the *args notation + param_name_str = ', '.join(self._param_names) + wrapper = "def fitFunction(x, {}):\n".format(param_name_str) + wrapper += " return func(x, {})".format(param_name_str) + + exec_dict = {'func': self.problem.function} + exec(wrapper, exec_dict) + + model = exec_dict['fitFunction'] + + # Remove any function attribute. BinWidth is the only attribute in all + # FitBenchmark (Mantid) problems. + param_dict = {name: value + for name, value + in zip(self._param_names, self.initial_params)} + + # Create a Function Wrapper for the problem function. The type of the + # Function Wrapper is acceptable by Bumps. + func_wrapper = Curve(fn=model, + x=self.data_x, + y=self.data_y, + dy=self.data_e, + **param_dict) + + # Set a range for each parameter + val_ranges = self.problem.value_ranges + for name in self._param_names: + min_val = -np.inf + max_val = np.inf + if val_ranges is not None and name in val_ranges: + min_val = val_ranges[name][0] + max_val = val_ranges[name][1] + func_wrapper.__dict__[name].range(min_val, max_val) + + # Create a Problem Wrapper. The type of the Problem Wrapper is + # acceptable by Bumps fitting. + self._func_wrapper = func_wrapper + self._fit_problem = FitProblem(func_wrapper) + if self.minimizer == "lm-bumps": + self.minimizer = "lm" + + def fit(self): + """ + Run problem with SasView. + """ + result = bumpsFit(self._fit_problem, method=self.minimizer) + + self._bumps_result = result + self._status = self._bumps_result.status + + def cleanup(self): + """ + Convert the result to a numpy array and populate the variables results + will be read from. + """ + if self._status == 0: + self.flag = 0 + elif self._status == 2: + self.flag = 1 + else: + self.flag = 2 + + self.final_params = self._bumps_result.x diff --git a/fitbenchmarking/controllers/tests/test_controllers.py b/fitbenchmarking/controllers/tests/test_controllers.py index 9bc42dea1..bb6016788 100644 --- a/fitbenchmarking/controllers/tests/test_controllers.py +++ b/fitbenchmarking/controllers/tests/test_controllers.py @@ -5,13 +5,13 @@ from fitbenchmarking import mock_problems from fitbenchmarking.controllers.base_controller import Controller +from fitbenchmarking.controllers.bumps_controller import BumpsController from fitbenchmarking.controllers.controller_factory import ControllerFactory from fitbenchmarking.controllers.dfo_controller import DFOController from fitbenchmarking.controllers.gsl_controller import GSLController from fitbenchmarking.controllers.mantid_controller import MantidController from fitbenchmarking.controllers.minuit_controller import MinuitController from fitbenchmarking.controllers.ralfit_controller import RALFitController -from fitbenchmarking.controllers.sasview_controller import SasviewController from fitbenchmarking.controllers.scipy_controller import ScipyController from fitbenchmarking.parsing.parser_factory import parse_problem_file @@ -240,11 +240,11 @@ def test_mantid_multifit_chisquared(self): 'Mantid controller found a different chi squared for multi fit' ' problem.') - def test_sasview(self): + def test_bumps(self): """ - SasviewController: Test for output shape + BumpsController: Test for output shape """ - controller = SasviewController(self.problem) + controller = BumpsController(self.problem) controller.minimizer = 'amoeba' self.shared_testing(controller) @@ -392,7 +392,7 @@ def test_imports(self): Test that the factory returns the correct class for inputs """ - valid = ['scipy', 'mantid', 'sasview', 'ralfit'] + valid = ['scipy', 'mantid', 'bumps', 'ralfit'] invalid = ['foo', 'bar', 'hello', 'r2d2'] for software in valid: diff --git a/fitbenchmarking/systests/expected_results/all_parsers.txt b/fitbenchmarking/systests/expected_results/all_parsers.txt index 797ca2361..e3260d929 100644 --- a/fitbenchmarking/systests/expected_results/all_parsers.txt +++ b/fitbenchmarking/systests/expected_results/all_parsers.txt @@ -1,8 +1,8 @@ - dfo gsl mantid minuit ralfit sasview scipy - dfogn lmsder BFGS minuit gn amoeba lm-scipy-no-jac -BENNETT5 inf (inf)[3] 1.639e-05 (1.021)[1] 0.02038 (1269)[2] 2.114e-05 (1.316) 1.606e-05 (1) 1.608e-05 (1.001) 1.905e-05 (1.186)[1] -cubic, Start 1 5.244e-14 (1.358e+08) 3.861e-22 (1) 1.85e-12 (4.792e+09) 3.586e-11 (9.288e+10) 6.723e-13 (1.741e+09) 1.119e-14 (2.899e+07) 3.861e-22 (1) -cubic, Start 2 2.424e-17 (6.278e+04) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 6.926e-18 (1.794e+04) 1.146e-14 (2.969e+07) 3.861e-22 (1) -cubic-fba 7.913e-19 (2049) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 9.768e-18 (2.53e+04) 1.146e-14 (2.969e+07) 3.861e-22 (1) -EMU 73673 1046 (1) 1053 (1.007)[1] 1552 (1.484)[2] 1057 (1.01) inf (inf)[3] 1.032e+05 (98.62) 1046 (1) -Problem Def 1 1.299e-10 (1) 0.04153 (3.199e+08) 33.36 (2.569e+11)[2] 4.214e-07 (3245) 0.1119 (8.618e+08) 6.784e+05 (5.225e+15) 0.03966 (3.055e+08) + bumps dfo gsl mantid minuit ralfit scipy + amoeba dfogn lmsder BFGS minuit gn lm-scipy-no-jac +BENNETT5 1.608e-05 (1.001) inf (inf)[3] 1.639e-05 (1.021)[1] 0.02038 (1269)[2] 2.114e-05 (1.316) 1.606e-05 (1) 1.905e-05 (1.186)[1] +cubic, Start 1 1.119e-14 (2.899e+07) 5.244e-14 (1.358e+08) 3.861e-22 (1) 1.85e-12 (4.792e+09) 3.586e-11 (9.288e+10) 6.723e-13 (1.741e+09) 3.861e-22 (1) +cubic, Start 2 1.146e-14 (2.969e+07) 2.424e-17 (6.278e+04) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 6.926e-18 (1.794e+04) 3.861e-22 (1) +cubic-fba 1.146e-14 (2.969e+07) 7.913e-19 (2049) 3.861e-22 (1) 3.306e-06 (8.563e+15) 7.579e-06 (1.963e+16) 9.768e-18 (2.53e+04) 3.861e-22 (1) +EMU 73673 1.032e+05 (98.62) 1046 (1) 1053 (1.007)[1] 1552 (1.484)[2] 1057 (1.01) inf (inf)[3] 1046 (1) +Problem Def 1 6.784e+05 (5.225e+15) 1.299e-10 (1) 0.04153 (3.199e+08) 33.36 (2.569e+11)[2] 4.214e-07 (3245) 0.1119 (8.618e+08) 0.03966 (3.055e+08) diff --git a/fitbenchmarking/utils/default_options.ini b/fitbenchmarking/utils/default_options.ini index 4e97f1ad9..720d75375 100644 --- a/fitbenchmarking/utils/default_options.ini +++ b/fitbenchmarking/utils/default_options.ini @@ -8,6 +8,15 @@ # entry for the software with a newline separated list of minimizers. # default is all available minimizers as follows: +# bumps: available minimizers (amoeba, lm-bumps, newton, de, mp) +# for more information see +# https://bumps.readthedocs.io/en/latest/guide/optimizer.html +bumps: amoeba + lm-bumps + newton + de + mp + # dfo: available minimimizers (dfogn, dfols) # for more information see # http://people.maths.ox.ac.uk/robertsl/dfogn/ @@ -64,15 +73,6 @@ ralfit: gn hybrid hybrid_reg -# sasview: available minimizers (amoeba, lm-bumps, newton, de, mp) -# for more information see -# https://bumps.readthedocs.io/en/latest/guide/optimizer.html -sasview: amoeba - lm-bumps - newton - de - mp - # scipy: available minimizers (lm-scipy, trf, dogbox) # for more information see scipy.optimize.least_squares.html # https://docs.scipy.org/doc/scipy/reference/generated/ @@ -99,13 +99,13 @@ num_runs: 5 # software is used to select the fitting software to benchmark, this should be # a newline-separated list -# default is dfo, minuit, sasview, and scipy -software: dfo +# default is bumps, dfo, minuit, and scipy +software: bumps + dfo # gsl # mantid minuit # ralfit - sasview scipy # use_errors will switch between weighted and unweighted least squares From 384347432ecbddf5f1158ba312d7273543a54106 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Wed, 20 May 2020 14:27:53 +0100 Subject: [PATCH 19/28] Updating links to open docs in new tabs --- fitbenchmarking/results_processing/base_table.py | 2 +- .../results_processing/tests/expected_results/acc.html | 2 +- .../results_processing/tests/expected_results/compare.html | 2 +- .../tests/expected_results/local_min.html | 2 +- .../results_processing/tests/expected_results/runtime.html | 2 +- fitbenchmarking/templates/table_template.html | 7 +++++-- 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/fitbenchmarking/results_processing/base_table.py b/fitbenchmarking/results_processing/base_table.py index b53db0e4b..6decf8a02 100644 --- a/fitbenchmarking/results_processing/base_table.py +++ b/fitbenchmarking/results_processing/base_table.py @@ -218,7 +218,7 @@ def to_html(self, table, colour, links, error): :rtype: str """ link_template = '{0}' + 'latest/concept/how/Minimizers.html#{0}" target="_blank">{0}' table.apply(lambda x: self.enable_error(x, error, "{}"), axis=1, result_type='expand') table.apply(lambda x: self.enable_link(x, links), axis=1, diff --git a/fitbenchmarking/results_processing/tests/expected_results/acc.html b/fitbenchmarking/results_processing/tests/expected_results/acc.html index 73c551f59..3f19d147c 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/acc.html +++ b/fitbenchmarking/results_processing/tests/expected_results/acc.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813df_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #fef0d9; - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 0.01 (238.1)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/compare.html b/fitbenchmarking/results_processing/tests/expected_results/compare.html index e0498d60b..f6f974e66 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/compare.html +++ b/fitbenchmarking/results_processing/tests/expected_results/compare.html @@ -15,7 +15,7 @@ background-image: linear-gradient(#b30000,#b30000,#b30000,#b30000); } #T_318813de_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-image: linear-gradient(#fef0d9,#fef0d9,#b30000,#b30000); - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 (1)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/local_min.html b/fitbenchmarking/results_processing/tests/expected_results/local_min.html index 36c4c3136..ec217c713 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/local_min.html +++ b/fitbenchmarking/results_processing/tests/expected_results/local_min.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e1_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 1 (1)
0.01 (238.1)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/results_processing/tests/expected_results/runtime.html b/fitbenchmarking/results_processing/tests/expected_results/runtime.html index d84d0c81c..f9c8ea496 100644 --- a/fitbenchmarking/results_processing/tests/expected_results/runtime.html +++ b/fitbenchmarking/results_processing/tests/expected_results/runtime.html @@ -15,7 +15,7 @@ background-color: #b30000; } #T_318813e0_7fe8_11ea_870a_fb713d9c56e2row1_col3 { background-color: #b30000; - }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 False (0.5174)1
+ }
scipy
lm-scipy-no-jac lm-scipy trf dogbox
diff --git a/fitbenchmarking/templates/table_template.html b/fitbenchmarking/templates/table_template.html index 08aef6b91..630aa7eae 100644 --- a/fitbenchmarking/templates/table_template.html +++ b/fitbenchmarking/templates/table_template.html @@ -6,7 +6,7 @@ - + Fitbenchmark Results Table @@ -23,7 +23,7 @@

{{ result_name }}

{% endif %}
- +

@@ -46,6 +46,9 @@

Clicking the problem names will take you to details of the best minimizer.

+

+ Clicking the software name will take you to FitBenchmarking Read the Docs documentation for the selected software. +

Errors

The superscripts in the tables denote current error handling within FitBenchmarking. From df99dc7bbd38bc7fccc22b5cc950f32bacc581f4 Mon Sep 17 00:00:00 2001 From: Anders-Markvardsen Date: Thu, 21 May 2020 07:54:38 +0100 Subject: [PATCH 20/28] change basic to default in filename and files --- azure-pipelines.yml | 2 +- .../{test_basic_controllers.py => test_default_controllers.py} | 2 +- fitbenchmarking/controllers/tests/test_externals_controllers.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename fitbenchmarking/controllers/tests/{test_basic_controllers.py => test_default_controllers.py} (99%) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a38c20332..ccb564323 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -30,6 +30,6 @@ steps: pytest fitbenchmarking\core pytest fitbenchmarking\results_processing pytest fitbenchmarking\parsing - pytest fitbenchmarking\controllers\test\test_basic_controllers.py + pytest fitbenchmarking\controllers\test\test_default_controllers.py pytest fitbenchmarking\cli\tests\test_exception_handler.py displayName: 'pytest utils tests' diff --git a/fitbenchmarking/controllers/tests/test_basic_controllers.py b/fitbenchmarking/controllers/tests/test_default_controllers.py similarity index 99% rename from fitbenchmarking/controllers/tests/test_basic_controllers.py rename to fitbenchmarking/controllers/tests/test_default_controllers.py index 1cf2a7b79..efd6729fd 100644 --- a/fitbenchmarking/controllers/tests/test_basic_controllers.py +++ b/fitbenchmarking/controllers/tests/test_default_controllers.py @@ -1,4 +1,4 @@ -# Tests for controllers available from basic fitbenchmarking install +# Tests for the controllers available from a default fitbenchmarking install import inspect import numpy as np import os diff --git a/fitbenchmarking/controllers/tests/test_externals_controllers.py b/fitbenchmarking/controllers/tests/test_externals_controllers.py index 09ce49294..1f98cb966 100644 --- a/fitbenchmarking/controllers/tests/test_externals_controllers.py +++ b/fitbenchmarking/controllers/tests/test_externals_controllers.py @@ -1,5 +1,5 @@ # Tests for controllers for externals, i.e. those available in addition -# to those from a basic fitbenchmarking install +# to those from a default fitbenchmarking install import inspect import numpy as np import os From 2534927f864ae2b5823d0c7359e464c12bfc5cca Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Thu, 21 May 2020 08:35:55 +0100 Subject: [PATCH 21/28] Apply suggestions from code review Co-authored-by: Anders Markvardsen --- fitbenchmarking/controllers/scipy_controller.py | 1 + fitbenchmarking/controllers/scipy_ls_controller.py | 1 + 2 files changed, 2 insertions(+) diff --git a/fitbenchmarking/controllers/scipy_controller.py b/fitbenchmarking/controllers/scipy_controller.py index 5bb65a415..27c717a1b 100644 --- a/fitbenchmarking/controllers/scipy_controller.py +++ b/fitbenchmarking/controllers/scipy_controller.py @@ -1,5 +1,6 @@ """ Implements a controller for the scipy fitting software. +In particular, here for the scipy minimize solver for general minimization problems """ from scipy.optimize import minimize diff --git a/fitbenchmarking/controllers/scipy_ls_controller.py b/fitbenchmarking/controllers/scipy_ls_controller.py index 9c16a2282..f373c1eba 100644 --- a/fitbenchmarking/controllers/scipy_ls_controller.py +++ b/fitbenchmarking/controllers/scipy_ls_controller.py @@ -1,5 +1,6 @@ """ Implements a controller for the scipy ls fitting software. +In particular, for the scipy least_squares solver. """ from scipy.optimize import least_squares From 9c39aa6aff80af1bb271c5dc608d5118273f1533 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Thu, 21 May 2020 12:25:09 +0100 Subject: [PATCH 22/28] Fixing unit and system tests --- examples/all_softwares.ini | 38 ++--- examples/options_template.ini | 38 ++--- .../tests/test_default_controllers.py | 145 ++++-------------- fitbenchmarking/utils/default_options.ini | 14 -- 4 files changed, 56 insertions(+), 179 deletions(-) diff --git a/examples/all_softwares.ini b/examples/all_softwares.ini index 8f504e209..37ae16707 100644 --- a/examples/all_softwares.ini +++ b/examples/all_softwares.ini @@ -77,14 +77,14 @@ # L-BFGS-B, TNC, SLSQP) # for more information see scipy.optimize.minimize.html # https://docs.scipy.org/doc/scipy/reference/generated/ -#scipy: Nelder-Mead -# Powell -# CG -# BFGS -# Newton-CG -# L-BFGS-B -# TNC -# SLSQP +scipy: Nelder-Mead + Powell + CG + BFGS + Newton-CG + L-BFGS-B + TNC + SLSQP # scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) # for more information see scipy.optimize.least_squares.html @@ -95,24 +95,10 @@ # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with # or without problem.eval_j for the Jacobian evaluation -#scipy_ls: lm-scipy-no-jac -# lm-scipy -# trf -# dogbox - -# scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html -# https://docs.scipy.org/doc/scipy/reference/generated/ -# Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation -# which are significantly faster and gives different results than -# using the minimizer "lm-scipy" which uses problem.eval_j for the -# Jacobian evaluation. We do not see significant speed changes or -# difference in the accuracy results when running trf or dogbox with -# or without problem.eval_j for the Jacobian evaluation -#scipy_ls: lm-scipy-no-jac -# lm-scipy -# trf -# dogbox +scipy_ls: lm-scipy-no-jac + lm-scipy + trf + dogbox ############################################################################## diff --git a/examples/options_template.ini b/examples/options_template.ini index dd188866e..36336bc0d 100644 --- a/examples/options_template.ini +++ b/examples/options_template.ini @@ -73,32 +73,18 @@ # hybrid # hybrid_reg -<<<<<<< HEAD -# scipy: available minimizers (lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html -======= -# sasview: available minimizers (amoeba, lm-bumps, newton, de, mp) -# for more information see -# https://bumps.readthedocs.io/en/latest/guide/optimizer.html -#sasview: amoeba -# lm-bumps -# newton -# de -# mp - # scipy: available minimizers (Nelder-Mead, Powell, CG, BFGS, Newton-CG, # L-BFGS-B, TNC, SLSQP) # for more information see scipy.optimize.minimize.html ->>>>>>> origin/release-0.1.x # https://docs.scipy.org/doc/scipy/reference/generated/ -#scipy: Nelder-Mead -# Powell -# CG -# BFGS -# Newton-CG -# L-BFGS-B -# TNC -# SLSQP +scipy: Nelder-Mead + Powell + CG + BFGS + Newton-CG + L-BFGS-B + TNC + SLSQP # scipy_ls: available minimizers (lm-scipy-no-jac, lm-scipy, trf, dogbox) # for more information see scipy.optimize.least_squares.html @@ -109,10 +95,10 @@ # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with # or without problem.eval_j for the Jacobian evaluation -#scipy_ls: lm-scipy-no-jac -# lm-scipy -# trf -# dogbox +scipy_ls: lm-scipy-no-jac + lm-scipy + trf + dogbox ############################################################################## diff --git a/fitbenchmarking/controllers/tests/test_default_controllers.py b/fitbenchmarking/controllers/tests/test_default_controllers.py index d37a69145..7190ba32c 100644 --- a/fitbenchmarking/controllers/tests/test_default_controllers.py +++ b/fitbenchmarking/controllers/tests/test_default_controllers.py @@ -154,11 +154,11 @@ class ControllerTests(TestCase): def setUp(self): self.problem = make_fitting_problem() - def test_sasview(self): + def test_bumps(self): """ SasviewController: Test for output shape """ - controller = SasviewController(self.problem) + controller = BumpsController(self.problem) controller.minimizer = 'amoeba' self.shared_testing(controller) @@ -169,96 +169,35 @@ def test_sasview(self): controller._status = 1 self.check_diverged(controller) - def test_scipy_ls(self): - """ -<<<<<<< HEAD:fitbenchmarking/controllers/tests/test_controllers.py - file_path = os.path.join('multifit_set', 'multifit.txt') - problem = make_fitting_problem(file_path) - - controller = MantidController(problem) - controller.minimizer = 'Levenberg-Marquardt' - - controller.parameter_set = 0 - controller.prepare() - controller.fit() - controller.cleanup() - - self.assertEqual(len(controller.final_params), len(controller.data_x), - 'Multifit did not return a result for each data file') - - self.assertEqual(len(controller.final_params[0]), - len(controller.initial_params), - 'Incorrect number of final params.') - - def test_mantid_singlefit_chisquared(self): + def test_dfo(self): """ - Test the override in Mantid conroller is working correctly for - evaluating chi_squared(SingleFit). + DFOController: Tests for output shape """ - m_controller = MantidController(self.problem) - b_controller = DummyController(self.problem) - params = np.array([1, 2, 3, 4]) - x = np.array([6, 2, 32, 4]) - y = np.array([1, 21, 3, 4]) - e = np.array([.5, .003, 1, 2]) + controller = DFOController(self.problem) + # test one from each class + minimizers = ['dfogn', + 'dfols'] + for minimizer in minimizers: + controller.minimizer = minimizer + self.shared_testing(controller) - expected = b_controller.eval_chisq(params=params, x=x, y=y, e=e) - actual = m_controller.eval_chisq(params=params, x=x, y=y, e=e) - - self.assertEqual(expected, actual, - 'Mantid controller found a different chi squared' - ' for single fit problem.') - - def test_mantid_multifit_chisquared(self): - """ - Test the override in Mantid conroller is working correctly for - evaluating chi_squared(MultiFit). - """ - m_controller = MantidController(self.problem) - b_controller = DummyController(self.problem) - params = [np.array([1, 2, 3, 4]), - np.array([1, 2, 3, 4]), - np.array([1, 2, 3, 4])] - xs = [np.array([6, 2, 32, 4]), - np.array([6, 2, 32, 4]), - np.array([6, 2, 32, 4])] - ys = [np.array([1, 21, 3, 4]), - np.array([1, 21, 3, 4]), - np.array([1, 21, 3, 4])] - es = [np.array([.5, .003, 1, 2]), - np.array([.5, .003, 1, 2]), - np.array([.5, .003, 1, 2])] - - expected = [b_controller.eval_chisq(params=p, x=x, y=y, e=e) - for x, y, e, p in zip(xs, ys, es, params)] - actual = m_controller.eval_chisq(params=params, x=xs, y=ys, e=es) - - self.assertListEqual( - expected, actual, - 'Mantid controller found a different chi squared for multi fit' - ' problem.') + controller._status = 0 + self.check_converged(controller) + controller._status = 2 + self.check_max_iterations(controller) + controller._status = 5 + self.check_diverged(controller) - def test_bumps(self): - """ - BumpsController: Test for output shape + def test_minuit(self): """ - controller = BumpsController(self.problem) - controller.minimizer = 'amoeba' -======= - ScipyController: Test for output shape + MinuitController: Tests for output shape """ - controller = ScipyLSController(self.problem) - controller.minimizer = 'lm' - - ->>>>>> > origin / release - 0.1.x: fitbenchmarking / controllers / tests / test_default_controllers.py + controller = MinuitController(self.problem) + controller.minimizer = 'minuit' self.shared_testing(controller) - - controller._status = 1 - self.check_converged(controller) controller._status = 0 - self.check_max_iterations(controller) - controller._status = -1 + self.check_converged(controller) + controller._status = 2 self.check_diverged(controller) def test_scipy(self): @@ -276,35 +215,20 @@ def test_scipy(self): controller._status = -1 self.check_diverged(controller) - def test_dfo(self): + def test_scipy_ls(self): """ - DFOController: Tests for output shape + ScipyLSController: Test for output shape """ - controller = DFOController(self.problem) - # test one from each class - minimizers = ['dfogn', - 'dfols'] - for minimizer in minimizers: - controller.minimizer = minimizer - self.shared_testing(controller) - - controller._status = 0 - self.check_converged(controller) - controller._status = 2 - self.check_max_iterations(controller) - controller._status = 5 - self.check_diverged(controller) + controller = ScipyLSController(self.problem) + controller.minimizer = 'lm' - def test_minuit(self): - """ - MinuitController: Tests for output shape - """ - controller = MinuitController(self.problem) - controller.minimizer = 'minuit' self.shared_testing(controller) - controller._status = 0 + + controller._status = 1 self.check_converged(controller) - controller._status = 2 + controller._status = 0 + self.check_max_iterations(controller) + controller._status = -1 self.check_diverged(controller) def shared_testing(self, controller): @@ -363,17 +287,12 @@ def test_imports(self): Test that the factory returns the correct class for inputs """ -<<<<<<< HEAD:fitbenchmarking/controllers/tests/test_controllers.py - valid = ['scipy', 'mantid', 'bumps', 'ralfit'] -======= valid = ['scipy_ls', 'mantid', 'sasview', 'ralfit'] valid_names = ['scipyls', 'mantid', 'sasview', 'ralfit'] ->>>>>>> origin/release-0.1.x:fitbenchmarking/controllers/tests/test_default_controllers.py invalid = ['foo', 'bar', 'hello', 'r2d2'] for software, v in zip(valid, valid_names): controller = ControllerFactory.create_controller(software) - print(controller.__name__.lower()) self.assertTrue(controller.__name__.lower().startswith(v)) for software in invalid: diff --git a/fitbenchmarking/utils/default_options.ini b/fitbenchmarking/utils/default_options.ini index 3a462b6d2..85f091365 100644 --- a/fitbenchmarking/utils/default_options.ini +++ b/fitbenchmarking/utils/default_options.ini @@ -73,23 +73,9 @@ ralfit: gn hybrid hybrid_reg -<<<<<<< HEAD -# scipy: available minimizers (lm-scipy, trf, dogbox) -# for more information see scipy.optimize.least_squares.html -======= -# sasview: available minimizers (amoeba, lm-bumps, newton, de, mp) -# for more information see -# https://bumps.readthedocs.io/en/latest/guide/optimizer.html -sasview: amoeba - lm-bumps - newton - de - mp - # scipy: available minimizers (Nelder-Mead, Powell, CG, BFGS, Newton-CG, # L-BFGS-B, TNC, SLSQP) # for more information see scipy.optimize.minimize.html ->>>>>>> origin/release-0.1.x # https://docs.scipy.org/doc/scipy/reference/generated/ scipy: Nelder-Mead Powell From c64670cae541d8e99b39fbec33751078d64fbe79 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Thu, 21 May 2020 13:48:43 +0100 Subject: [PATCH 23/28] Correcting SasView to Bumps docstrings Co-authored-by: Tyrone Rees --- fitbenchmarking/controllers/bumps_controller.py | 10 +++++----- .../controllers/tests/test_default_controllers.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fitbenchmarking/controllers/bumps_controller.py b/fitbenchmarking/controllers/bumps_controller.py index b503c7592..ab1dfa44e 100644 --- a/fitbenchmarking/controllers/bumps_controller.py +++ b/fitbenchmarking/controllers/bumps_controller.py @@ -1,5 +1,5 @@ """ -Implements a controller for the SasView fitting software. +Implements a controller for the Bumps fitting software. """ from bumps.fitters import fit as bumpsFit @@ -12,7 +12,7 @@ class BumpsController(Controller): """ - Controller for the Sasview fitting software. + Controller for the Bumps fitting software. Sasview requires a model to fit. Setup creates a model with the correct function. @@ -35,9 +35,9 @@ def __init__(self, problem): def setup(self): """ - Setup problem ready to run with SasView. + Setup problem ready to run with Bumps. - Creates a Sasview FitProblem for calling in fit() + Creates a FitProblem for calling in the fit() function of Bumps """ # Bumps fails with the *args notation param_name_str = ', '.join(self._param_names) @@ -82,7 +82,7 @@ def setup(self): def fit(self): """ - Run problem with SasView. + Run problem with Bumps. """ result = bumpsFit(self._fit_problem, method=self.minimizer) diff --git a/fitbenchmarking/controllers/tests/test_default_controllers.py b/fitbenchmarking/controllers/tests/test_default_controllers.py index 7190ba32c..99e9c11af 100644 --- a/fitbenchmarking/controllers/tests/test_default_controllers.py +++ b/fitbenchmarking/controllers/tests/test_default_controllers.py @@ -156,7 +156,7 @@ def setUp(self): def test_bumps(self): """ - SasviewController: Test for output shape + BumpsController: Test for output shape """ controller = BumpsController(self.problem) controller.minimizer = 'amoeba' From 2324746c4847eef73e16ab16e325b35c5eb5b48a Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Thu, 21 May 2020 13:51:06 +0100 Subject: [PATCH 24/28] Removing unused sasview controller --- .../controllers/sasview_controller.py | 104 ------------------ .../tests/test_default_controllers.py | 4 +- 2 files changed, 2 insertions(+), 106 deletions(-) delete mode 100644 fitbenchmarking/controllers/sasview_controller.py diff --git a/fitbenchmarking/controllers/sasview_controller.py b/fitbenchmarking/controllers/sasview_controller.py deleted file mode 100644 index 1c11112d3..000000000 --- a/fitbenchmarking/controllers/sasview_controller.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Implements a controller for the SasView fitting software. -""" - -from bumps.fitters import fit as bumpsFit -from bumps.names import Curve, FitProblem - -import numpy as np - -from fitbenchmarking.controllers.base_controller import Controller - - -class SasviewController(Controller): - """ - Controller for the Sasview fitting software. - - Sasview requires a model to fit. - Setup creates a model with the correct function. - """ - - def __init__(self, problem): - """ - Extract param names for function setup - - :param problem: Problem to fit - :type problem: FittingProblem - """ - super(SasviewController, self).__init__(problem) - - self._param_names = self.problem.param_names - - self._func_wrapper = None - self._fit_problem = None - self._bumps_result = None - - def setup(self): - """ - Setup problem ready to run with SasView. - - Creates a Sasview FitProblem for calling in fit() - """ - # Bumps fails with the *args notation - param_name_str = ', '.join(self._param_names) - wrapper = "def fitFunction(x, {}):\n".format(param_name_str) - wrapper += " return func(x, {})".format(param_name_str) - - exec_dict = {'func': self.problem.function} - exec(wrapper, exec_dict) - - model = exec_dict['fitFunction'] - - # Remove any function attribute. BinWidth is the only attribute in all - # FitBenchmark (Mantid) problems. - param_dict = {name: value - for name, value - in zip(self._param_names, self.initial_params)} - - # Create a Function Wrapper for the problem function. The type of the - # Function Wrapper is acceptable by Bumps. - func_wrapper = Curve(fn=model, - x=self.data_x, - y=self.data_y, - dy=self.data_e, - **param_dict) - - # Set a range for each parameter - val_ranges = self.problem.value_ranges - for name in self._param_names: - min_val = -np.inf - max_val = np.inf - if val_ranges is not None and name in val_ranges: - min_val = val_ranges[name][0] - max_val = val_ranges[name][1] - func_wrapper.__dict__[name].range(min_val, max_val) - - # Create a Problem Wrapper. The type of the Problem Wrapper is - # acceptable by Bumps fitting. - self._func_wrapper = func_wrapper - self._fit_problem = FitProblem(func_wrapper) - if self.minimizer == "lm-bumps": - self.minimizer = "lm" - - def fit(self): - """ - Run problem with SasView. - """ - result = bumpsFit(self._fit_problem, method=self.minimizer) - - self._bumps_result = result - self._status = self._bumps_result.status - - def cleanup(self): - """ - Convert the result to a numpy array and populate the variables results - will be read from. - """ - if self._status == 0: - self.flag = 0 - elif self._status == 2: - self.flag = 1 - else: - self.flag = 2 - - self.final_params = self._bumps_result.x diff --git a/fitbenchmarking/controllers/tests/test_default_controllers.py b/fitbenchmarking/controllers/tests/test_default_controllers.py index 99e9c11af..ca6a8ce07 100644 --- a/fitbenchmarking/controllers/tests/test_default_controllers.py +++ b/fitbenchmarking/controllers/tests/test_default_controllers.py @@ -287,8 +287,8 @@ def test_imports(self): Test that the factory returns the correct class for inputs """ - valid = ['scipy_ls', 'mantid', 'sasview', 'ralfit'] - valid_names = ['scipyls', 'mantid', 'sasview', 'ralfit'] + valid = ['scipy_ls', 'mantid', 'ralfit'] + valid_names = ['scipyls', 'mantid', 'ralfit'] invalid = ['foo', 'bar', 'hello', 'r2d2'] for software, v in zip(valid, valid_names): From 149bb268166dfb5f488768a26374fc7e4ca800a2 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 22 May 2020 09:33:43 +0100 Subject: [PATCH 25/28] Fixing scipy jacobian eval to use prob.jac --- .coveragerc | 1 + examples/all_softwares.ini | 4 ++-- examples/options_template.ini | 4 ++-- fitbenchmarking/controllers/scipy_controller.py | 6 +++--- fitbenchmarking/controllers/scipy_ls_controller.py | 4 ++-- fitbenchmarking/utils/default_options.ini | 4 ++-- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.coveragerc b/.coveragerc index 54eab0343..49dabe6bc 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,7 @@ [run] omit = */tests/* + fitbenchmarking/cli/main.py [report] diff --git a/examples/all_softwares.ini b/examples/all_softwares.ini index 104bcdb4d..783362d6c 100644 --- a/examples/all_softwares.ini +++ b/examples/all_softwares.ini @@ -91,10 +91,10 @@ scipy: Nelder-Mead # https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than -# using the minimizer "lm-scipy" which uses problem.eval_j for the +# using the minimizer "lm-scipy" which uses problem.jac.eval for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with -# or without problem.eval_j for the Jacobian evaluation +# or without problem.jac.eval for the Jacobian evaluation scipy_ls: lm-scipy-no-jac lm-scipy trf diff --git a/examples/options_template.ini b/examples/options_template.ini index 5a17b30e7..d5ee414ac 100644 --- a/examples/options_template.ini +++ b/examples/options_template.ini @@ -91,10 +91,10 @@ scipy: Nelder-Mead # https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than -# using the minimizer "lm-scipy" which uses problem.eval_j for the +# using the minimizer "lm-scipy" which uses problem.jac.eval for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with -# or without problem.eval_j for the Jacobian evaluation +# or without problem.jac.eval for the Jacobian evaluation scipy_ls: lm-scipy-no-jac lm-scipy trf diff --git a/fitbenchmarking/controllers/scipy_controller.py b/fitbenchmarking/controllers/scipy_controller.py index 27c717a1b..b4ecf6138 100644 --- a/fitbenchmarking/controllers/scipy_controller.py +++ b/fitbenchmarking/controllers/scipy_controller.py @@ -41,9 +41,9 @@ def eval_jac(self, x, *args): :return: Approximation of the Jacobian :rtype: numpy array """ - out = self.problem.eval_j(params=x, - func=self.problem.eval_r_norm, - *args) + out = self.problem.jac(params=x, + func=self.problem.eval_r_norm, + *args) return out def fit(self): diff --git a/fitbenchmarking/controllers/scipy_ls_controller.py b/fitbenchmarking/controllers/scipy_ls_controller.py index f373c1eba..4a1eeb7de 100644 --- a/fitbenchmarking/controllers/scipy_ls_controller.py +++ b/fitbenchmarking/controllers/scipy_ls_controller.py @@ -37,7 +37,7 @@ def fit(self): # using the minimizer "lm-scipy" which uses problem.eval_j for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with - # or without problem.eval_j for the Jacobian evaluation + # or without problem.jac.eval for the Jacobian evaluation if self.minimizer == "lm-scipy-no-jac": self.result = least_squares(fun=self.problem.eval_r, x0=self.initial_params, @@ -47,7 +47,7 @@ def fit(self): self.result = least_squares(fun=self.problem.eval_r, x0=self.initial_params, method=self.minimizer, - jac=self.problem.eval_j, + jac=self.problem.jac, max_nfev=500) self._popt = self.result.x diff --git a/fitbenchmarking/utils/default_options.ini b/fitbenchmarking/utils/default_options.ini index bf86b5b9a..14ebd1cf4 100644 --- a/fitbenchmarking/utils/default_options.ini +++ b/fitbenchmarking/utils/default_options.ini @@ -91,10 +91,10 @@ scipy: Nelder-Mead # https://docs.scipy.org/doc/scipy/reference/generated/ # Note: The minimizer "lm-scipy-no-jac" uses MINPACK's Jacobian evaluation # which are significantly faster and gives different results than -# using the minimizer "lm-scipy" which uses problem.eval_j for the +# using the minimizer "lm-scipy" which uses problem.jac.eval for the # Jacobian evaluation. We do not see significant speed changes or # difference in the accuracy results when running trf or dogbox with -# or without problem.eval_j for the Jacobian evaluation +# or without problem.jac.eval for the Jacobian evaluation scipy_ls: lm-scipy-no-jac lm-scipy trf From 705998636a93f39f72519c15904073fc408932d5 Mon Sep 17 00:00:00 2001 From: Tyrone Rees Date: Fri, 22 May 2020 09:44:44 +0100 Subject: [PATCH 26/28] Fill out workflow docs --- docs/source/contributors/guidelines.rst | 25 +++++---- docs/source/contributors/index.rst | 1 + docs/source/contributors/workflow.rst | 74 +++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 11 deletions(-) create mode 100644 docs/source/contributors/workflow.rst diff --git a/docs/source/contributors/guidelines.rst b/docs/source/contributors/guidelines.rst index 4d1bda742..2cf1a3922 100644 --- a/docs/source/contributors/guidelines.rst +++ b/docs/source/contributors/guidelines.rst @@ -1,8 +1,16 @@ .. _guidelines: -###################### -Contributor Guidelines -###################### +################ +Coding Standards +################ + +All code submitted must meet certain standards, outlined below, before +it can be merged into the master branch. It is the contributor's +job to ensure that the following is satisfied, and the reviewer's +role to check that these guidelines have been followed. + +The workflow to be used for submitting new code/issues is described in +:ref:`workflow`. ======= Linting @@ -17,15 +25,10 @@ for this. Documentation ============= -Any new code will be accepted only if there is relevent documentation for it in -the corresponding pull request. - -=== -Git -=== +Any new code will be accepted only if the documentation, written in sphinx and +found in `docs/`, has been updated accordingly, and the docstrings in the code +have been updated where neccessary. -Branches should be created from master, and by convention follow the pattern -"nnn_description" where nnn is the issue number. ======= Testing diff --git a/docs/source/contributors/index.rst b/docs/source/contributors/index.rst index 4086814e9..180a50d96 100644 --- a/docs/source/contributors/index.rst +++ b/docs/source/contributors/index.rst @@ -12,6 +12,7 @@ Here you will find all you need in order to get started. :caption: Contents: guidelines + workflow structure Module Index diff --git a/docs/source/contributors/workflow.rst b/docs/source/contributors/workflow.rst new file mode 100644 index 000000000..823c604d3 --- /dev/null +++ b/docs/source/contributors/workflow.rst @@ -0,0 +1,74 @@ +.. _workflow: + +############ +Git Workflow +############ + + +====== +Issues +====== + +All new work should start with a +`new GitHub issue `_ +being filed. +This should clearly explain what the change to the code will do. +There are templates for *Bug report*, *Documentation*, +*Feature request* and *Test* issues on GitHub, and you can also +open a blank issue if none of these work. + +If issues help meet a piece of work agreed with our funders, it +is linked to the appropriate `Milestone `_ in GitHub. + +=============== +Adding new code +=============== + +The first step in adding new code is to create a branch, where the work +will be done. Branches should be named according to the convention +"-description", where is the issue number. + +Please ensure everything required in :ref:`guidelines` is included in +the branch. + +When you think your new code is ready to be merged into the codebase, +you should open a pull request. The description should contain the +words "Fixes #", where is the issue number; this will ensure +the issue is closed when the code is merged into master. At this point +the automated tests will trigger, and you can see if the code passes on +an independent system. + +Sometimes it is desirable to open a pull request when the code is not +quite ready to be merged. This is a good idea, for example, if you want +to get an early opinion on a coding descision. If this is the case, you +should mark the pull request as a *draft* on GitHub. + +Once the work is ready to be reviewed, you may want to assign a reviewer, +if you think someone would be well suited to review this change. It is worth +messaging them on, for example, Slack, as well as requesting their review on +GitHub. + +================ +Release branches +================ + +Most work should be branched off, and merged into, master. + +The exception is when we are near a release, and then the contributor +must make the decsion of whether the code will enter this release, or +wait for a future one. + +Branches named `release-*` are protected branches; code must be approved by +a reviewer before being added to them, and automated tests will be run on +pull requests to these branches. If code is to be included in the release, it +must be pulled into this branch and not master. + +Release branches should have the format `release-major.minor.x`, starting from +`release-0.1.x`. When the code is released, we will tag that commit with +`release-0.1.0`. Any hotfixes will increment `x` by one, and a new tag will +be created accordingly. If at some point we don't want to provide hot-fixes +to a given minor release, then the corresponding release branch may be deleted. + +When a pull request is merged into a release branch, then the change should also +be merged into master as soon a possible. As long as the tests pass, and there +are no merge conflicts, this can be done without a detailed review. From 136fbb481308aa854ef99cf67b9c4cb0326aa792 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 22 May 2020 09:50:20 +0100 Subject: [PATCH 27/28] Fixing jacobians in scipy and external tests --- fitbenchmarking/controllers/scipy_controller.py | 6 +++--- fitbenchmarking/controllers/scipy_ls_controller.py | 2 +- .../controllers/tests/test_externals_controllers.py | 3 +++ 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/fitbenchmarking/controllers/scipy_controller.py b/fitbenchmarking/controllers/scipy_controller.py index b4ecf6138..0774257bd 100644 --- a/fitbenchmarking/controllers/scipy_controller.py +++ b/fitbenchmarking/controllers/scipy_controller.py @@ -41,9 +41,9 @@ def eval_jac(self, x, *args): :return: Approximation of the Jacobian :rtype: numpy array """ - out = self.problem.jac(params=x, - func=self.problem.eval_r_norm, - *args) + out = self.problem.jac.eval(params=x, + func=self.problem.eval_r_norm, + *args) return out def fit(self): diff --git a/fitbenchmarking/controllers/scipy_ls_controller.py b/fitbenchmarking/controllers/scipy_ls_controller.py index 4a1eeb7de..4adb0f235 100644 --- a/fitbenchmarking/controllers/scipy_ls_controller.py +++ b/fitbenchmarking/controllers/scipy_ls_controller.py @@ -47,7 +47,7 @@ def fit(self): self.result = least_squares(fun=self.problem.eval_r, x0=self.initial_params, method=self.minimizer, - jac=self.problem.jac, + jac=self.problem.jac.eval, max_nfev=500) self._popt = self.result.x diff --git a/fitbenchmarking/controllers/tests/test_externals_controllers.py b/fitbenchmarking/controllers/tests/test_externals_controllers.py index 1f98cb966..893aef400 100644 --- a/fitbenchmarking/controllers/tests/test_externals_controllers.py +++ b/fitbenchmarking/controllers/tests/test_externals_controllers.py @@ -15,6 +15,7 @@ from fitbenchmarking.parsing.parser_factory import parse_problem_file from fitbenchmarking.utils import exceptions from fitbenchmarking.utils.options import Options +from fitbenchmarking.jacobian.SciPyFD_2point_jacobian import ScipyTwoPoint def make_fitting_problem(file_name='cubic.dat'): @@ -28,6 +29,8 @@ def make_fitting_problem(file_name='cubic.dat'): fitting_problem = parse_problem_file(fname, options) fitting_problem.correct_data() + jac = ScipyTwoPoint(fitting_problem) + fitting_problem.jac = jac return fitting_problem From ec58c8b439f5ed66d60cf46104bfb32071816033 Mon Sep 17 00:00:00 2001 From: Michael Wathen Date: Fri, 22 May 2020 10:06:26 +0100 Subject: [PATCH 28/28] Update docs/source/contributors/workflow.rst --- docs/source/contributors/workflow.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/contributors/workflow.rst b/docs/source/contributors/workflow.rst index 823c604d3..606e60979 100644 --- a/docs/source/contributors/workflow.rst +++ b/docs/source/contributors/workflow.rst @@ -55,7 +55,7 @@ Release branches Most work should be branched off, and merged into, master. The exception is when we are near a release, and then the contributor -must make the decsion of whether the code will enter this release, or +must make the decision of whether the code will enter this release, or wait for a future one. Branches named `release-*` are protected branches; code must be approved by

scipy
lm-scipy-no-jac lm-scipy trf dogbox
prob_0 0.01 (238.1)1