Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

205 improve documentation on entry points #225

Merged
merged 13 commits into from
Oct 10, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 19 additions & 14 deletions example_scripts/example_runScripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
import sys
import glob

from fitbenchmarking.fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from fitbenchmarking.results_output import save_results_tables as printTables
from fitbenchmarking.fitting_benchmarking import fitbenchmark_group
from fitbenchmarking.results_output import save_results_tables


def main(args):
Expand Down Expand Up @@ -56,10 +56,10 @@ def main(args):
# e.g. lower that 1.1 -> light yellow, higher than 3 -> dark red
# Change these values to suit your needs
color_scale = [(1.1, 'ranking-top-1'),
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]

# ADD WHICH PROBLEM SETS TO TEST AGAINST HERE
# Do this, in this example file, by selecting sub-folders in benchmark_probs_dir
Expand All @@ -82,16 +82,21 @@ def main(args):
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
results, results_dir = \
fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for _, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)
# Display the runtime and accuracy results in a table
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))

Expand Down
25 changes: 15 additions & 10 deletions example_scripts/example_runScripts_SasView.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
'******************************************')
sys.exit()

from fitbenchmarking.fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from fitbenchmarking.results_output import save_results_tables as printTables
from fitbenchmarking.fitting_benchmarking import fitbenchmark_group
from fitbenchmarking.results_output import save_results_tables

# SPECIFY THE SOFTWARE/PACKAGE CONTAINING THE MINIMIZERS YOU WANT TO BENCHMARK
software = ['sasview']
Expand Down Expand Up @@ -100,15 +100,20 @@
data_dir = os.path.join(benchmark_probs_dir, sub_dir)

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
results, results_dir = fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

# Display the runtime and accuracy results in a table
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
47 changes: 24 additions & 23 deletions example_scripts/example_runScripts_expert.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import sys
import glob

from fitbenchmarking.fitting_benchmarking import do_benchmarking
from fitbenchmarking.fitting_benchmarking import _benchmark
from fitbenchmarking.utils import misc
from fitbenchmarking.utils import create_dirs
from fitbenchmarking.results_output import save_tables, generate_tables, \
Expand Down Expand Up @@ -90,10 +90,11 @@ def main(argv):
# Processes software_options dictionary into Fitbenchmarking format
minimizers, software = misc.get_minimizers(software_options)

# Sets up the problem groups specified by the user by providing
# Sets up the problem group specified by the user by providing
# a respective data directory.
problem_groups = misc.setup_fitting_problems(data_dir, group_name)
problem_group = misc.setup_fitting_problems(data_dir)

# Create output dirs
results_dir = create_dirs.results(results_dir)
group_results_dir = create_dirs.group_results(results_dir, group_name)

Expand All @@ -102,35 +103,35 @@ def main(argv):
group_results_dir, use_errors)

# Loops through group of problems and benchmark them
prob_results = do_benchmarking(user_input, problem_groups, group_name)
prob_results = _benchmark(user_input, problem_group)

print('\nProducing output for the {} problem set\n'.format(group_name))
for idx, group_results in enumerate(prob_results):
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

if isinstance(software, list):
minimizers = sum(minimizers, [])
# Creates the results directory where the tables are located
tables_dir = create_dirs.restables_dir(results_dir, group_name)

# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(group_results,
group_name, results_dir)
if isinstance(software, list):
minimizers = sum(minimizers, [])

# Generates accuracy and runtime normalised tables and summary tables
norm_acc_rankings, norm_runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(group_results, minimizers)
# Creates the problem names with links to the visual display pages
# in rst
linked_problems = visual_pages.create_linked_probs(prob_results,
group_name, results_dir)

# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, norm_acc_rankings, use_errors, color_scale)
# Generates accuracy and runtime tables and summary tables
acc_rankings, runtimes, sum_cells_acc, sum_cells_runtime = generate_tables(prob_results, minimizers)

# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, norm_runtimes, use_errors, color_scale)
# Creates an accuracy table
acc_tbl = create_acc_tbl(minimizers, linked_problems, acc_rankings, use_errors, color_scale)

# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')
# Creates an runtime table
runtime_tbl = create_runtime_tbl(minimizers, linked_problems, runtimes, use_errors, color_scale)

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')
# Saves accuracy minimizer results
save_tables(tables_dir, acc_tbl, use_errors, group_name, 'acc')

# Saves runtime minimizer results
save_tables(tables_dir, runtime_tbl, use_errors, group_name, 'runtime')

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))

Expand Down
39 changes: 22 additions & 17 deletions example_scripts/example_runScripts_mantid.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@

try:
import mantid.simpleapi as msapi
except:
except ImportError:
print('******************************************\n'
'Mantid is not yet installed on your computer\n'
'To install, go to the directory where setup.py is located and simply type:\n'
'python setup.py install externals -s mantid\n'
'******************************************')
sys.exit()

from fitbenchmarking.fitting_benchmarking import do_fitting_benchmark as fitBenchmarking
from fitbenchmarking.results_output import save_results_tables as printTables
from fitbenchmarking.fitting_benchmarking import fitbenchmark_group
from fitbenchmarking.results_output import save_results_tables


def main(argv):
Expand All @@ -33,7 +33,7 @@ def main(argv):

# User defined minimizers
custom_minimizers = {"mantid": ["Levenberg-Marquardt"],
"scipy": ["lm", "trf", "dogbox"]}
"scipy": ["lm", "trf", "dogbox"]}
# custom_minimizers = None

# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
Expand All @@ -53,7 +53,7 @@ def main(argv):
# Benchmark problem directories
fitbenchmarking_folder = os.path.abspath(os.path.join(current_path, os.pardir))
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
'benchmark_problems')
'benchmark_problems')

"""
Modify results_dir to specify where the results of the fit should be saved
Expand All @@ -69,10 +69,10 @@ def main(argv):
# e.g. lower that 1.1 -> light yellow, higher than 3 -> dark red
# Change these values to suit your needs
color_scale = [(1.1, 'ranking-top-1'),
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]
(1.33, 'ranking-top-2'),
(1.75, 'ranking-med-3'),
(3, 'ranking-low-4'),
(float('nan'), 'ranking-low-5')]

# ADD WHICH PROBLEM SETS TO TEST AGAINST HERE
# Do this, in this example file, by selecting sub-folders in benchmark_probs_dir
Expand All @@ -93,16 +93,21 @@ def main(argv):
continue

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
results, results_dir = fitbenchmark_group(group_name=label,
software_options=software_options,
data_dir=data_dir,
use_errors=use_errors,
results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

# Display the runtime and accuracy results in a table
save_results_tables(software_options=software_options,
results_per_test=results,
group_name=label,
use_errors=use_errors,
color_scale=color_scale,
results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))

Expand Down
Loading