Skip to content

Commit

Permalink
Merge 10ca91a into 99eed25
Browse files Browse the repository at this point in the history
  • Loading branch information
AndrewLister-STFC committed Sep 18, 2019
2 parents 99eed25 + 10ca91a commit 800a9b6
Show file tree
Hide file tree
Showing 9 changed files with 256 additions and 215 deletions.
29 changes: 15 additions & 14 deletions example_scripts/example_runScripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
scripts_folder = os.path.join(fitbenchmarking_folder, 'fitbenchmarking')
sys.path.insert(0, scripts_folder)

from fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from results_output import save_results_tables as printTables
from fitting_benchmarking import fitbenchmark_group
from results_output import save_results_tables

# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ['scipy']
Expand Down Expand Up @@ -89,20 +89,21 @@

test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
results, results_dir = \
fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)
# Display the runtime and accuracy results in a table
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
59 changes: 32 additions & 27 deletions example_scripts/example_runScripts_SasView.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,32 +47,32 @@
'******************************************')
sys.exit()

from fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from results_output import save_results_tables as printTables
from fitting_benchmarking import fitbenchmark_group
from results_output import save_results_tables

# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ['sasview']
software_options = {'software': software}

# User defined minimizers
custom_minimizers = {"mantid": ["BFGS", "Simplex"],
"scipy": ["lm", "trf", "dogbox"],
"sasview": ["amoeba"]}
"scipy": ["lm", "trf", "dogbox"],
"sasview": ["amoeba"]}
# custom_minimizers = None
# "amoeba", "lm", "newton", "de", "pt", "mp"

# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
Expand Down Expand Up @@ -104,22 +104,27 @@
problem_sets = ["SAS_modelling/1D"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)
# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results, results_dir = fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)
print('\nProducing output for the {} problem set\n'.format(label))

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
# Display the runtime and accuracy results in a table
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
50 changes: 24 additions & 26 deletions example_scripts/example_runScripts_expert.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
scripts_folder = os.path.join(fitbenchmarking_folder, 'fitbenchmarking')
sys.path.insert(0, scripts_folder)

from fitting_benchmarking import do_benchmarking
from fitting_benchmarking import _benchmark
from utils import misc
from utils import create_dirs
from results_output import save_tables, generate_tables, \
Expand Down Expand Up @@ -91,19 +91,17 @@

test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(group_name))

# Processes software_options dictionary into Fitbenchmarking format
minimizers, software = misc.get_minimizers(software_options)

# Sets up the problem groups specified by the user by providing
# Sets up the problem group specified by the user by providing
# a respective data directory.
problem_groups = misc.setup_fitting_problems(data_dir, group_name)
problem_group = misc.setup_fitting_problems(data_dir)

# Create output dirs
results_dir = create_dirs.results(results_dir)
group_results_dir = create_dirs.group_results(results_dir, group_name)

Expand All @@ -112,34 +110,34 @@
group_results_dir, use_errors)

# Loops through group of problems and benchmark them
prob_results = do_benchmarking(user_input, problem_groups, group_name)
prob_results = _benchmark(user_input, problem_group)

print('\nProducing output for the {} problem set\n'.format(group_name))
for idx, group_results in enumerate(prob_results):
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

if isinstance(software, list):
minimizers = sum(minimizers, [])
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(group_results,
group_name, results_dir)
if isinstance(software, list):
minimizers = sum(minimizers, [])

# Generates accuracy and runtime normalised tables and summary tables
norm_acc_rankings, norm_runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(group_results, minimizers)
# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(prob_results,
group_name, results_dir)

# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, norm_acc_rankings, use_errors, color_scale)
# Generates accuracy and runtime tables and summary tables
acc_rankings, runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(prob_results, minimizers)

# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, norm_runtimes, use_errors, color_scale)
# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, acc_rankings, use_errors, color_scale)

# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')
# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, runtimes, use_errors, color_scale)

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')
# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
62 changes: 32 additions & 30 deletions example_scripts/example_runScripts_mantid.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
fitbenchmarking_folder = os.path.abspath(os.path.join(current_path, os.pardir))
scripts_folder = os.path.join(fitbenchmarking_folder, 'fitbenchmarking')
sys.path.insert(0, scripts_folder)
sys.path.insert(1, fitbenchmarking_folder)

try:
import mantid.simpleapi as msapi
Expand All @@ -33,31 +34,31 @@
'******************************************')
sys.exit()

from fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from results_output import save_results_tables as printTables
from fitting_benchmarking import fitbenchmark_group
from results_output import save_results_tables

# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ["mantid"]
software_options = {'software': software}

# User defined minimizers
custom_minimizers = {"mantid": ["Simplex"],
"scipy": ["lm", "trf", "dogbox"]}
"scipy": ["lm", "trf", "dogbox"]}
# custom_minimizers = None


# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
Expand Down Expand Up @@ -89,28 +90,29 @@
"SAS_modelling/1D"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)
# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

test_data = glob.glob(data_dir + '/*.*')
test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue
print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results, results_dir = fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
print('\nProducing output for the {} problem set\n'.format(label))

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
Loading

0 comments on commit 800a9b6

Please sign in to comment.