Skip to content

Commit

Permalink
Merge 4493dbe into bcb0d3b
Browse files Browse the repository at this point in the history
  • Loading branch information
Michael Wathen committed Jul 10, 2020
2 parents bcb0d3b + 4493dbe commit d650814
Show file tree
Hide file tree
Showing 7 changed files with 796 additions and 16 deletions.
3 changes: 3 additions & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ max-statements=60
# Certain functions require more arguments due to design
max-args=10

# Ignores pylint errors for args and kwargs
ignored-argument-names=arg|args|kwargs

[MESSAGES CONTROL]
disable=too-many-instance-attributes,
invalid-name,
Expand Down
16 changes: 0 additions & 16 deletions fitbenchmarking/core/tests/test_fitting_benchmarking.py

This file was deleted.

173 changes: 173 additions & 0 deletions fitbenchmarking/core/tests/test_fitting_benchmarking_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
"""
Tests for fitbenchmarking.core.fitting_benchmarking.benchmark
"""
from __future__ import (absolute_import, division, print_function)
import inspect
import copy
import os
import unittest
import mock

from fitbenchmarking import mock_problems
from fitbenchmarking.utils import fitbm_result
from fitbenchmarking.core.fitting_benchmarking import benchmark
from fitbenchmarking.parsing.parser_factory import parse_problem_file
from fitbenchmarking.utils.options import Options
from fitbenchmarking.jacobian.SciPyFD_2point_jacobian import ScipyTwoPoint
from fitbenchmarking.utils.exceptions import NoResultsError

# Defines the module which we mock out certain function calls for
FITTING_DIR = "fitbenchmarking.core.fitting_benchmarking"


def make_fitting_problem(file_name='cubic.dat', minimizers=None):
"""
Helper function that returns a simple fitting problem
"""
options = Options()
if minimizers:
options.minimizers = minimizers

bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
fname = os.path.join(bench_prob_dir, file_name)

fitting_problem = parse_problem_file(fname, options)
fitting_problem.correct_data()
jac = ScipyTwoPoint(fitting_problem)
fitting_problem.jac = jac
return fitting_problem


def dict_test(expected, actual):
"""
Test to check two dictionaries are the same
:param expected: expected dictionary result
:type expected: dict
:param actual: actual dictionary result
:type actual: dict
"""
for key in actual.keys():
assert key in expected.keys()
assert sorted(actual[key]) == sorted(expected[key])


class BenchmarkTests(unittest.TestCase):
"""
benchmark tests
"""

def setUp(self):
"""
Setting up problem for tests
"""
self.problem = make_fitting_problem()
self.options = Options()
self.options.software = ["scipy"]
self.scipy_len = len(self.options.minimizers["scipy"])
bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
self.default_parsers_dir = os.path.join(bench_prob_dir,
"default_parsers")
self.all_minimzers = copy.copy(self.options.minimizers)

def shared_tests(self, expected_names, expected_unselected_minimzers,
expected_minimzers):
"""
Shared tests for the `benchmark` function
:param expected_names: expected sorted list of problem names
:type expected_names: list
:param expected_unselected_minimzers: expected unselected minimizers
:type expected_unselected_minimzers: dict
:param expected_minimzers: expected minimizers
:type expected_minimzers: dict
"""
results, failed_problems, unselected_minimzers = \
benchmark(self.options, self.default_parsers_dir)

assert len(results) == len(expected_names)
for i, name in enumerate(expected_names):
assert all(p.name == name for p in results[i])

assert failed_problems == []
dict_test(expected_unselected_minimzers, unselected_minimzers)
dict_test(expected_minimzers, self.options.minimizers)

@mock.patch('{}.loop_over_benchmark_problems'.format(FITTING_DIR))
def test_check_no_unselected_minimizers(self,
loop_over_benchmark_problems):
"""
Checks benchmarking runs with no unselected minimizers
"""
names = ["random_1", "random_3", "random_2"]
expected_names = sorted(names)
results = []
for name in names:
result_args = {'options': self.options,
'problem': self.problem,
'jac': self.problem.jac,
'initial_params': self.problem.starting_values[0],
'params': [],
'chi_sq': 1,
'name': name}
list_results = [fitbm_result.FittingResult(**result_args)
for j in range(self.scipy_len)]
results.extend(list_results)
problem_fails = []
expected_unselected_minimzers = {"scipy": []}
expected_minimzers = copy.copy(self.all_minimzers)

loop_over_benchmark_problems.return_value = \
(results, problem_fails, expected_unselected_minimzers)
self.shared_tests(expected_names, expected_unselected_minimzers,
expected_minimzers)

@mock.patch('{}.loop_over_benchmark_problems'.format(FITTING_DIR))
def test_check_unselected_minimizers(self, loop_over_benchmark_problems):
"""
Checks benchmarking runs with a few unselected minimizers
"""
names = ["random_1", "random_3", "random_2"]
expected_names = sorted(names)
results = []
for name in names:
result_args = {'options': self.options,
'problem': self.problem,
'jac': self.problem.jac,
'initial_params': self.problem.starting_values[0],
'params': [],
'chi_sq': 1,
'name': name}
list_results = [fitbm_result.FittingResult(**result_args)
for j in range(self.scipy_len)]
results.extend(list_results)
problem_fails = []
expected_unselected_minimzers = {"scipy": ['SLSQP', 'Powell', 'CG']}
expected_minimzers = copy.copy(self.all_minimzers)
for keys, minimzers in expected_unselected_minimzers.items():
expected_minimzers[keys] = \
list(set(expected_minimzers[keys]) - set(minimzers))

loop_over_benchmark_problems.return_value = \
(results, problem_fails, expected_unselected_minimzers)
self.shared_tests(expected_names, expected_unselected_minimzers,
expected_minimzers)

@mock.patch('{}.loop_over_benchmark_problems'.format(FITTING_DIR))
def test_check_no_results_produced(self, loop_over_benchmark_problems):
"""
Checks benchmarking raises an error when no results are produced
"""

results = []
problem_fails = []
expected_unselected_minimzers = {"scipy": []}
loop_over_benchmark_problems.return_value = \
(results, problem_fails, expected_unselected_minimzers)
with self.assertRaises(NoResultsError):
_, _, _ = \
benchmark(self.options, self.default_parsers_dir)


if __name__ == "__main__":
unittest.main()
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
"""
Tests for
fitbenchmarking.core.fitting_benchmarking.loop_over_benchmark_problems
"""
from __future__ import (absolute_import, division, print_function)
import inspect
import os
import unittest
import mock

from fitbenchmarking import mock_problems
from fitbenchmarking.utils import fitbm_result
from fitbenchmarking.core.fitting_benchmarking import \
loop_over_benchmark_problems
from fitbenchmarking.parsing.parser_factory import parse_problem_file
from fitbenchmarking.utils.options import Options
from fitbenchmarking.jacobian.SciPyFD_2point_jacobian import ScipyTwoPoint

# Defines the module which we mock out certain function calls for
FITTING_DIR = "fitbenchmarking.core.fitting_benchmarking"


# Due to structure of tests, some variables may not be previously defined
# in the init function.
# pylint: disable=attribute-defined-outside-init
def make_fitting_problem(file_name='cubic.dat', minimizers=None):
"""
Helper function that returns a simple fitting problem
"""
options = Options()
if minimizers:
options.minimizers = minimizers

bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
fname = os.path.join(bench_prob_dir, file_name)

fitting_problem = parse_problem_file(fname, options)
fitting_problem.correct_data()
jac = ScipyTwoPoint(fitting_problem)
fitting_problem.jac = jac
return fitting_problem


class LoopOverBenchmarkProblemsTests(unittest.TestCase):
"""
loop_over_starting_values tests
"""

def setUp(self):
"""
Setting up problem for tests
"""
self.problem = make_fitting_problem()
self.options = Options()
self.options.software = ["scipy"]
self.scipy_len = len(self.options.minimizers["scipy"])
bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
self.default_parsers_dir = os.path.join(bench_prob_dir,
"default_parsers")
self.count = 0
self.result_args = {'options': self.options,
'problem': self.problem,
'jac': self.problem.jac,
'initial_params': self.problem.starting_values[0],
'params': [],
'chi_sq': 1}
self.list_results = [fitbm_result.FittingResult(**self.result_args)
for i in range(self.scipy_len)]
self.individual_problem_results = [
self.list_results, self.list_results]

def mock_func_call(self, *args, **kwargs):
"""
Mock function to be used instead of loop_over_starting_values
"""
individual_problem_results = \
self.individual_problem_results[self.count]
problem_fails = self.problem_fails
unselected_minimzers = {"scipy": []}
self.count += 1
return individual_problem_results, problem_fails, unselected_minimzers

def shared_tests(self, list_len, expected_problem_fails):
"""
Shared tests for the `loop_over_starting_values` function
:param list_len: number of expect fitting results
:type list_len: int
:param expected_problem_fails: list of problems which fail
:type expected_problem_fails: list
"""
results, failed_problems, unselected_minimzers = \
loop_over_benchmark_problems(self.problem_group,
self.options)
assert len(results) == list_len
assert failed_problems == expected_problem_fails
for keys, values in unselected_minimzers.items():
assert keys == "scipy"
assert values == []

@mock.patch('{}.loop_over_starting_values'.format(FITTING_DIR))
def test_run_multiple_benchmark_problems(self, loop_over_starting_values):
"""
Checks that all benchmark problems run with no failures
"""
self.problem_fails = []
loop_over_starting_values.side_effect = self.mock_func_call
self.problem_group = []
for file_name in ["cubic.dat", "prob_def_1.txt"]:
self.problem_group.append(
os.path.join(self.default_parsers_dir, file_name))
expected_problem_fails = self.problem_fails
expected_list_length = len(self.list_results) * 2
self.shared_tests(expected_list_length, expected_problem_fails)

@mock.patch('{}.loop_over_starting_values'.format(FITTING_DIR))
def test_run_multiple_failed_problems(self, loop_over_starting_values):
"""
Checks that multiple failed problems are reported correctly
"""
self.problem_fails = ['Random_failed_problem_1',
'Random_failed_problem_2']
loop_over_starting_values.side_effect = self.mock_func_call
self.problem_group = []
self.problem_group = [os.path.join(self.default_parsers_dir,
"cubic.dat")]

expected_problem_fails = self.problem_fails
expected_list_length = len(self.list_results)
self.shared_tests(expected_list_length, expected_problem_fails)


if __name__ == "__main__":
unittest.main()

0 comments on commit d650814

Please sign in to comment.