Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue #134: Add option to options file for displaying absolute value,… #220

Merged
merged 1 commit into from
Sep 11, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 12 additions & 11 deletions example_scripts/example_runScripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,16 @@

# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = current_path + sys.argv[1]
# Read custom minimizer options from file
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
else:
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None
# Using default minimizers from
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None

# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
Expand Down Expand Up @@ -94,14 +95,14 @@

print('\nRunning the benchmarking on the {} problem set\n'.format(label))
results_per_group, results_dir = fitBenchmarking(group_name=label, software_options=software_options,
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)
data_dir=data_dir,
use_errors=use_errors, results_dir=results_dir)

print('\nProducing output for the {} problem set\n'.format(label))
for idx, group_results in enumerate(results_per_group):
# Display the runtime and accuracy results in a table
printTables(software_options, group_results,
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)
group_name=label, use_errors=use_errors,
color_scale=color_scale, results_dir=results_dir)

print('\nCompleted benchmarking for {} problem set\n'.format(sub_dir))
4 changes: 2 additions & 2 deletions example_scripts/example_runScripts_SasView.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@
# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = current_path + sys.argv[1]
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
Expand All @@ -73,7 +74,6 @@
# fitbenchmarking/fitbenchmarking/fitbenchmarking_default_options.json
software_options['minimizer_options'] = None


# Benchmark problem directories
benchmark_probs_dir = os.path.join(fitbenchmarking_folder,
'benchmark_problems')
Expand Down
3 changes: 2 additions & 1 deletion example_scripts/example_runScripts_expert.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@
# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = current_path + sys.argv[1]
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
Expand Down
3 changes: 2 additions & 1 deletion example_scripts/example_runScripts_mantid.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@
# SPECIFY THE MINIMIZERS YOU WANT TO BENCHMARK, AND AS A MINIMUM FOR THE SOFTWARE YOU SPECIFIED ABOVE
if len(sys.argv) > 1:
# Read custom minimizer options from file
software_options['minimizer_options'] = current_path + sys.argv[1]
software_options['minimizer_options'] = None
software_options['options_file'] = current_path + sys.argv[1]
elif custom_minimizers:
# Custom minimizer options:
software_options['minimizer_options'] = custom_minimizers
Expand Down
6 changes: 4 additions & 2 deletions fitbenchmarking/fitbenchmarking_default_options.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"minimizers": {
"minimizers" : {
"mantid" : ["BFGS", "Conjugate gradient (Fletcher-Reeves imp.)",
"Conjugate gradient (Polak-Ribiere imp.)",
"Damped GaussNewton",
Expand All @@ -8,5 +8,7 @@
"Trust Region"],
"scipy" : ["lm", "trf", "dogbox"],
"sasview" : ["amoeba", "lm", "newton", "de", "pt", "mp"]
}
},

"comparison_mode" : "both"
}
55 changes: 44 additions & 11 deletions fitbenchmarking/resproc/numpy_restables.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,31 +77,64 @@ def create_norm_tbls(accuracy_tbl, time_tbl):
return norm_acc_rankings, norm_runtimes,


def create_summary_tbls(norm_acc_rankings, norm_runtimes):
def create_summary_tbls(acc_rankings, runtimes):
"""
Creates summary tables of the obtained results, i.e. the minimum
maximum, mean and median of each column in the normalised numpy
arrays.

@param norm_acc_rankings :: the normalised accuracy results numpy array
@param norm_runtimes :: the normalised runtime results numpy array
@param acc_rankings :: the combined accuracy results numpy array
@param runtimes :: the combined runtime results numpy array

@returns :: the summary tables for both runtime and accuracy
"""
acc_rankings = acc_rankings[:, :, 1]
runtimes = runtimes[:, :, 1]

summary_cells_acc = np.array([np.nanmin(norm_acc_rankings, 0),
np.nanmax(norm_acc_rankings, 0),
nanmean(norm_acc_rankings, 0),
nanmedian(norm_acc_rankings, 0)])
summary_cells_acc = np.array([np.nanmin(acc_rankings, 0),
np.nanmax(acc_rankings, 0),
nanmean(acc_rankings, 0),
nanmedian(acc_rankings, 0)])

summary_cells_runtime = np.array([np.nanmin(norm_runtimes, 0),
np.nanmax(norm_runtimes, 0),
nanmean(norm_runtimes, 0),
nanmedian(norm_runtimes, 0)])
summary_cells_runtime = np.array([np.nanmin(runtimes, 0),
np.nanmax(runtimes, 0),
nanmean(runtimes, 0),
nanmedian(runtimes, 0)])

return summary_cells_acc, summary_cells_runtime


def create_combined_tbls(abs_accuracy, rel_accuracy, abs_runtime, rel_runtime):
"""
Create a table that holds both absolute and relative information on
each result.

@param abs_accuracy :: The table with the absolute accuracy reported
@param rel_accuracy :: The table with the relative accuracy reported
@param abs_runtime :: The table with the absolute runtime reported
@param rel_runtime :: The table with the relative runtime reported

@returns :: combined_accuracy and combined_runtime tables with both
values present in each cell.
e.g. combined_accuracy[2,3,0] == rel_accuracy[2,3]
combined_accuracy[2,3,1] == abs_accuracy[2,3]
"""

accuracy_shape = (abs_accuracy.shape[0], abs_accuracy.shape[1], 2)
runtime_shape = (abs_runtime.shape[0], abs_runtime.shape[1], 2)

combined_accuracy = np.zeros(accuracy_shape)
combined_runtime = np.zeros(runtime_shape)

combined_accuracy[:, :, 0] = abs_accuracy
combined_accuracy[:, :, 1] = rel_accuracy

combined_runtime[:, :, 0] = abs_runtime
combined_runtime[:, :, 1] = rel_runtime

return combined_accuracy, combined_runtime


def init_numpy_tbls(results_per_test, minimizers):
"""
Helper function that initialises the numpy tables.
Expand Down
86 changes: 65 additions & 21 deletions fitbenchmarking/resproc/rst_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@


def create(columns_txt, rows_txt, cells, comparison_type, comparison_dim,
using_errors, color_scale=None):
using_errors, color_scale=None, comparison_mode='abs'):
"""
Creates a rst table of accuracy and runtime tables obtained
through fitting a certain problem set by using various
Expand All @@ -50,14 +50,17 @@ def create(columns_txt, rows_txt, cells, comparison_type, comparison_dim,
@param comparison_dim :: the comparison dimension, either acc or runtime
@param using_errors :: boolean whether to use errors or not
@param color_scale :: color scale for coloring the cells
@param comparison_mode :: str to select between 'abs', 'rel', 'both' for
the style of comparison returned

@returns :: rst table of the results
"""

columns_txt = display_name_for_minimizers(columns_txt)
items_link = \
build_items_links(comparison_type, comparison_dim, using_errors)
cell_len = calc_cell_len(columns_txt, items_link, cells, color_scale)

cell_len = calc_cell_len(columns_txt, items_link, cells, color_scale, mode=comparison_mode)

# The first column tends to be disproportionately long if it has a link
first_col_len = calc_first_col_len(cell_len, rows_txt)
Expand All @@ -67,13 +70,13 @@ def create(columns_txt, rows_txt, cells, comparison_type, comparison_dim,
tbl_header = tbl_htop + '\n' + tbl_htext + '\n' + tbl_hbottom + '\n'
tbl_footer = tbl_htop + '\n'
tbl_body = create_table_body(cells, items_link, rows_txt, first_col_len,
cell_len, color_scale, tbl_footer)
cell_len, color_scale, tbl_footer, mode=comparison_mode)

return tbl_header + tbl_body


def create_table_body(cells, items_link, rows_txt, first_col_len, cell_len,
color_scale, tbl_footer):
color_scale, tbl_footer, mode):
"""
Creates the body of the rst table that holds all the fitting results.

Expand All @@ -85,46 +88,56 @@ def create_table_body(cells, items_link, rows_txt, first_col_len, cell_len,
@param cell_len :: the length of the cells in the table
@param color_scale :: color scale for coloring the cells
@param tbl_footer :: the rst footer of the table
@param mode :: str to select between 'abs', 'rel', 'both' for
the style of comparison returned

@returns :: the rst table body
"""

tbl_body = ''
for row in range(0, cells.shape[0]):
link = items_link

all_fit_failed_status = ''
if np.isnan(cells[row, :]).all():
if np.isnan(cells[row, :, 0]).all():
all_fit_failed_status = '(all fit failed)'

tbl_body += '|' + rows_txt[row].ljust(first_col_len-len(all_fit_failed_status), ' ')\
+ all_fit_failed_status +'|'
+ all_fit_failed_status + '|'

for col in range(0, cells.shape[1]):
tbl_body += format_cell_value(cells[row, col], cell_len,
color_scale, link)
tbl_body += format_cell_value(value=cells[row, col],
width=cell_len,
color_scale=color_scale,
items_link=items_link,
mode=mode)
tbl_body += '|'

tbl_body += '\n' + tbl_footer

return tbl_body


def calc_cell_len(columns_txt, items_link, cells, color_scale=None):
def calc_cell_len(columns_txt, items_link, cells, color_scale=None, mode='abs'):
"""
Calculates the cell length of the rst table.

@param columns_txt :: array of minimizers used in fitting
@param items_link :: link to the items
@param cells :: numpy array of the results (either runtime or accuracy)
@param color_scale :: color scale for coloring the cells
@param mode :: str to select between 'abs', 'rel', 'both' for
the style of comparison returned

@returns :: the cell length of the rest table
"""

max_header = len(max((col for col in columns_txt), key=len))
max_value = max(("%.4g" % cell for cell in np.nditer(cells)), key=len)
max_value = max(cells.reshape((-1, 2)), key=lambda x: len(cell_to_string(x, mode)))
max_item = determine_max_item(items_link)
cell_len = len(format_cell_value(value=float(max_value),
cell_len = len(format_cell_value(value=max_value,
color_scale=color_scale,
items_link=max_item).strip()) + 2
items_link=max_item,
mode=mode).strip()) + 2
if cell_len < max_header:
cell_len = max_header

Expand Down Expand Up @@ -199,52 +212,59 @@ def build_header_chunks(first_col_len, cell_len, columns_txt):
return tbl_header_top, tbl_header_text, tbl_header_bottom


def format_cell_value(value, width=None, color_scale=None, items_link=None):
def format_cell_value(value, width=None, color_scale=None, items_link=None, mode='abs'):
"""
Formats the cell values and adds color if a color scale is provided.

@param value :: the values of the color if it is added
@param width :: the width of the cell if it is given
@param color_scale :: color scale for coloring the cells
@param items_links :: items_link string or array
@param mode :: str to select between 'abs', 'rel', 'both' for
the style of comparison returned

@returns :: the correct value text string
"""

value_text = cell_to_string(value, mode)

if not color_scale:
value_text = no_color_scale_cv(items_link, value)
value_text = no_color_scale_cv(items_link, value_text)
else:
value_text = color_scale_cv(color_scale, value)
value_text = color_scale_cv(color_scale, value[1], value_text)

if width is not None:
value_text = value_text.ljust(width, ' ')

return value_text


def no_color_scale_cv(items_link, value):
def no_color_scale_cv(items_link, value_text):
"""
Creates the values text if no color scale is provided.

@param items_links :: items_link string or array
@param value_text :: text representing the value for the cell

@returns :: the no coloring value text string, containing
the items_links
"""

if not items_link:
value_text = ' {0:.4g}'.format(value)
value_text = ' {}'.format(value_text)
else:
value_text = ' :ref:`{0:.4g} <{1}>`'.format(value, items_link)
value_text = ' :ref:`{0} <{1}>`'.format(value_text, items_link)

return value_text


def color_scale_cv(color_scale, value):
def color_scale_cv(color_scale, value, text):
"""
Creates the values text if a color scale is provided.

@param color_scale :: color scale for coloring the cells
@param value :: the values of the color if it is added
@param text :: the cell text

@returns :: the value text with added color values
"""
Expand All @@ -257,7 +277,7 @@ def color_scale_cv(color_scale, value):
if not color:
color = color_scale[-1][1]

value_text = " :{0}:`{1:.4g}`".format(color, value)
value_text = " :{0}:`{1}`".format(color, text)

return value_text

Expand Down Expand Up @@ -325,3 +345,27 @@ def convert_rst_to_html(table_data):
table_data = publish_string(rst_content, writer_name='html')

return table_data


def cell_to_string(value, mode='abs'):
"""
Utility function to choose display mode. Options for mode are:
'abs' - The value as it was returned
'rel' - The value relative to other values (smallest is 1)
'both' - The 'abs' result followed by the 'rel' result in brackets

@param value :: The value to convert
@param mode :: The display mode

@returns :: String with the correct formatting
"""

if mode not in ['abs', 'rel', 'both']:
raise ValueError('Could not decifer mode "{}". Please select from "abs", "rel", or "both"'.format(mode))

if mode == 'both':
return '{:.4g} ({:.4g})'.format(value[0], value[1]) # NOQA
elif mode == 'rel':
return '{:.4g}'.format(value[1]) # NOQA
else:
return '{:.4g}'.format(value[0]) # NOQA
Loading