Skip to content

Commit

Permalink
Merge a62a333 into fd26bfb
Browse files Browse the repository at this point in the history
  • Loading branch information
AndrewLister-STFC committed Sep 25, 2019
2 parents fd26bfb + a62a333 commit b148c87
Show file tree
Hide file tree
Showing 7 changed files with 321 additions and 268 deletions.
3 changes: 3 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ jobs:
- stage: Tests
before_script: mantidpython -m mantid.simpleapi || true
script:
# ======= Examples Tests =============== #
- travis_wait pytest example_scripts/ --cov=example_scripts/
--cov-report term-missing
# ======= Fitting Tests =============== #
- pytest fitbenchmarking/fitting/ --cov=fitbenchmarking/fitting/
--cov-report term-missing
Expand Down
Empty file added example_scripts/__init__.py
Empty file.
163 changes: 83 additions & 80 deletions example_scripts/example_runScripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,84 +19,87 @@
from fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from results_output import save_results_tables as printTables

# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ['scipy']
software_options = {'software': software}

# User defined minimizers
# custom_minimizers = {"mantid": ["BFGS", "Simplex"],
# "scipy": ["lm", "trf", "dogbox"]}
custom_minimizers = None


# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
'benchmark_problems')


"""
Modify results_dir to specify where the results of the fit should be saved
If left as None, they will be saved in a "results" folder in the working dir
If the full path is not given results_dir is created relative to the working dir
"""
results_dir = None

# Whether to use errors in the fitting process
use_errors = True

# Parameters of how the final tables are colored
# e.g. lower that 1.1 -> light yellow, higher than 3 -> dark red
# Change these values to suit your needs
color_scale = [(1.1, 'ranking-top-1'),
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]


# ADD WHICH PROBLEM SETS TO TEST AGAINST HERE
# Do this, in this example file, by selecting sub-folders in benchmark_probs_dir
# problem_sets = ["CUTEst", "Muon", "Neutron", "NIST/average_difficulty", "NIST/high_difficulty", "NIST/low_difficulty",
# "SAS_modelling/1D"]

problem_sets = ["simple_tests"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
def main(args):
# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ['scipy']
software_options = {'software': software}

# User defined minimizers
# custom_minimizers = {"mantid": ["BFGS", "Simplex"],
# "scipy": ["lm", "trf", "dogbox"]}
custom_minimizers = None

# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(args) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + args[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
'benchmark_problems')

"""
Modify results_dir to specify where the results of the fit should be saved
If left as None, they will be saved in a "results" folder in the working dir
If the full path is not given results_dir is created relative to the working dir
"""
results_dir = None

# Whether to use errors in the fitting process
use_errors = True

# Parameters of how the final tables are colored
# e.g. lower that 1.1 -> light yellow, higher than 3 -> dark red
# Change these values to suit your needs
color_scale = [(1.1, 'ranking-top-1'),
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]

# ADD WHICH PROBLEM SETS TO TEST AGAINST HERE
# Do this, in this example file, by selecting sub-folders in benchmark_probs_dir
# problem_sets = ["CUTEst", "Muon", "Neutron", "NIST/average_difficulty", "NIST/high_difficulty", "NIST/low_difficulty",
# "SAS_modelling/1D"]

problem_sets = ["simple_tests"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for _, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))


if __name__ == '__main__':
main(sys.argv)
Loading

0 comments on commit b148c87

Please sign in to comment.