Skip to content

Commit

Permalink
Merge ed921d1 into fe511d4
Browse files Browse the repository at this point in the history
  • Loading branch information
AndrewLister-STFC committed Sep 12, 2019
2 parents fe511d4 + ed921d1 commit 87bbf8e
Show file tree
Hide file tree
Showing 9 changed files with 234 additions and 178 deletions.
15 changes: 7 additions & 8 deletions example_scripts/example_runScripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,14 @@
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
results, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)
# Display the runtime and accuracy results in a table
printTables(software_options, results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
44 changes: 22 additions & 22 deletions example_scripts/example_runScripts_SasView.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,23 +56,23 @@

# User defined minimizers
custom_minimizers = {"mantid": ["BFGS", "Simplex"],
"scipy": ["lm", "trf", "dogbox"],
"sasview": ["amoeba"]}
"scipy": ["lm", "trf", "dogbox"],
"sasview": ["amoeba"]}
# custom_minimizers = None
# "amoeba", "lm", "newton", "de", "pt", "mp"

# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
Expand Down Expand Up @@ -104,22 +104,22 @@
problem_sets = ["SAS_modelling/1D"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)
# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
printTables(software_options, results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
45 changes: 23 additions & 22 deletions example_scripts/example_runScripts_expert.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,11 @@
# Processes software_options dictionary into Fitbenchmarking format
minimizers, software = misc.get_minimizers(software_options)

# Sets up the problem groups specified by the user by providing
# Sets up the problem group specified by the user by providing
# a respective data directory.
problem_groups = misc.setup_fitting_problems(data_dir, group_name)
problem_group = misc.setup_fitting_problems(data_dir)

# Create output dirs
results_dir = create_dirs.results(results_dir)
group_results_dir = create_dirs.group_results(results_dir, group_name)

Expand All @@ -112,34 +113,34 @@
group_results_dir, use_errors)

# Loops through group of problems and benchmark them
prob_results = do_benchmarking(user_input, problem_groups, group_name)
prob_results = do_benchmarking(user_input, problem_group)

print('\nProducing output for the {} problem set\n'.format(group_name))
for idx, group_results in enumerate(prob_results):
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

if isinstance(software, list):
minimizers = sum(minimizers, [])
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(group_results,
group_name, results_dir)
if isinstance(software, list):
minimizers = sum(minimizers, [])

# Generates accuracy and runtime normalised tables and summary tables
norm_acc_rankings, norm_runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(group_results, minimizers)
# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(prob_results,
group_name, results_dir)

# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, norm_acc_rankings, use_errors, color_scale)
# Generates accuracy and runtime normalised tables and summary tables
norm_acc_rankings, norm_runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(prob_results, minimizers)

# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, norm_runtimes, use_errors, color_scale)
# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, norm_acc_rankings, use_errors, color_scale)

# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')
# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, norm_runtimes, use_errors, color_scale)

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')
# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
51 changes: 26 additions & 25 deletions example_scripts/example_runScripts_mantid.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
fitbenchmarking_folder = os.path.abspath(os.path.join(current_path, os.pardir))
scripts_folder = os.path.join(fitbenchmarking_folder, 'fitbenchmarking')
sys.path.insert(0, scripts_folder)
sys.path.insert(1, fitbenchmarking_folder)

try:
import mantid.simpleapi as msapi
Expand All @@ -42,22 +43,22 @@

# User defined minimizers
custom_minimizers = {"mantid": ["Simplex"],
"scipy": ["lm", "trf", "dogbox"]}
"scipy": ["lm", "trf", "dogbox"]}
# custom_minimizers = None


# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
Expand Down Expand Up @@ -89,28 +90,28 @@
"SAS_modelling/1D"]

for sub_dir in problem_sets:
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')
# generate group label/name used for problem set
label = sub_dir.replace('/', '_')

# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)
# Create full path for the directory that holds a group of problem definition files
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

test_data = glob.glob(data_dir + '/*.*')
test_data = glob.glob(data_dir + '/*.*')

if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue
if test_data == []:
print('Problem set {} not found'.format(sub_dir))
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
printTables(software_options, results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
Loading

0 comments on commit 87bbf8e

Please sign in to comment.