Skip to content

Commit

Permalink
stop pformat() of parameters, which wasn't necessary; fully test read…
Browse files Browse the repository at this point in the history
…_model()
  • Loading branch information
artgoldberg committed Feb 8, 2020
1 parent 4c96c9f commit 1139b35
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 43 deletions.
6 changes: 6 additions & 0 deletions tests/perf_results/wc_sim_performance_log.txt
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,9 @@ Performance summary on 2020-02-07
8 2681 7.432 360.715
32 10852 42.173 257.320

Performance summary on 2020-02-08
#SSA submodels # events run time (s) reactions/s
2 721 1.229 586.532
8 2681 5.551 483.000
32 10852 41.156 263.682

22 changes: 11 additions & 11 deletions tests/testing/test_verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,11 @@ def test_read_model(self):
self.assertTrue(isinstance(model, obj_tables.Model))
self.assertEqual(model.id, 'test_case_' + verification_test_reader.test_case_num)

# test exception
with self.assertRaisesRegexp(VerificationError, "SBML files not supported"):
model_file_suffix = f"-test_file{VerificationTestReader.SBML_FILE_SUFFIX}"
verification_test_reader.read_model(model_file_suffix=model_file_suffix)

def test_get_species_id(self):
verification_test_reader = make_verification_test_reader('00001', 'DISCRETE_STOCHASTIC')
verification_test_reader.run()
Expand Down Expand Up @@ -432,7 +437,6 @@ def test_quantify_stoch_diff(self):
mean_diffs = results_comparator.quantify_stoch_diff(evaluate=True)
for mean_diff in mean_diffs.values():
self.assertTrue(isinstance(mean_diff, float))
# todo: QUANT DIFF: checking correct values
# diff should be 0 because test RunResults is created from expected mean populations
self.assertTrue(math.isclose(mean_diff, 0.))

Expand Down Expand Up @@ -699,8 +703,7 @@ def test__run_tests(self):
ode_time_step_factors=ode_time_step_factors)
self.assertEqual(len(results), len(ode_time_step_factors))
last_result = results[-1]
params = eval(last_result.params)
self.assertEqual(params['ode_time_step_factor'], ode_time_step_factors[-1])
self.assertEqual(last_result.params['ode_time_step_factor'], ode_time_step_factors[-1])

max_rtol = 1E-9
max_atol = 1E-11
Expand All @@ -711,20 +714,18 @@ def test__run_tests(self):
empty_results=True)
self.assertEqual(len(results), 2 * 3)
last_result = results[-1]
params = eval(last_result.params)
self.assertEqual(params['tolerances']['rtol'], max_rtol)
self.assertEqual(params['tolerances']['atol'], max_atol)
self.assertEqual(last_result.params['tolerances']['rtol'], max_rtol)
self.assertEqual(last_result.params['tolerances']['atol'], max_atol)

results = self.verification_suite._run_tests('CONTINUOUS_DETERMINISTIC', '00001',
ode_time_step_factors=ode_time_step_factors,
tolerance_ranges=test_tolerance_ranges,
empty_results=True)
self.assertEqual(len(results), len(ode_time_step_factors) * 2 * 3)
last_result = results[-1]
params = eval(last_result.params)
self.assertEqual(params['ode_time_step_factor'], ode_time_step_factors[-1])
self.assertEqual(params['tolerances']['rtol'], max_rtol)
self.assertEqual(params['tolerances']['atol'], max_atol)
self.assertEqual(last_result.params['ode_time_step_factor'], ode_time_step_factors[-1])
self.assertEqual(last_result.params['tolerances']['rtol'], max_rtol)
self.assertEqual(last_result.params['tolerances']['atol'], max_atol)

def test_tolerance_ranges_for_sensitivity_analysis(self):
tolerance_ranges = VerificationSuite.tolerance_ranges_for_sensitivity_analysis()
Expand Down Expand Up @@ -770,7 +771,6 @@ def test_run_multialg(self):

results = self.verification_suite.run_multialg(['00007'], evaluate=True)
last_result = results.pop()
# todo: QUANT DIFF: check correct values
self.assertTrue(isinstance(last_result.quant_diff, dict))
for diff_mean in last_result.quant_diff.values():
self.assertTrue(isinstance(diff_mean, float))
Expand Down
52 changes: 20 additions & 32 deletions wc_sim/testing/verify.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,12 +254,21 @@ def slope_of_predictions(self):

return derivatives

def read_model(self):
""" Read a model into a `wc_lang` representation. """
self.model_filename = model_filename = os.path.join(
self.test_case_dir, self.test_case_num+'-wc_lang.xlsx')
if model_filename.endswith(self.SBML_FILE_SUFFIX): # pragma: no cover
raise VerificationError("Reading SBML files not supported: model filename '{}'".format(model_filename))
def read_model(self, model_file_suffix='-wc_lang.xlsx'):
""" Read a model into a `wc_lang` representation
Args:
model_file_suffix (:obj:`str`, optional): the name suffix for the model
Returns:
:obj:`wc_lang.Model`: the root of the test case's `wc_lang` model
Raises:
:obj:`VerificationError`: if an SBML model is read
"""
self.model_filename = os.path.join(self.test_case_dir, self.test_case_num + model_file_suffix)
if self.model_filename.endswith(self.SBML_FILE_SUFFIX):
raise VerificationError(f"SBML files not supported: model filename: '{self.model_filename}'")
return Reader().run(self.model_filename, validate=True)[Model][0]

def get_species_id(self, species_type):
Expand Down Expand Up @@ -358,7 +367,6 @@ def prepare_tolerances(self):
pass
return kwargs

# todo: QUANT DIFF: add 'evaluate' option
def quantify_stoch_diff(self, evaluate=False):
""" Quantify the difference between stochastic simulation population(s) and expected population(s)
Expand Down Expand Up @@ -408,7 +416,7 @@ def quantify_stoch_diff(self, evaluate=False):
differences[species_type] = Z

if evaluate:
# todo: QUANT DIFF: find mean diff for each species
# find mean diff for each species
for species_type, Z in differences.items():
differences[species_type] = np.mean(Z)
return differences
Expand Down Expand Up @@ -551,7 +559,6 @@ def __init__(self, test_cases_root_dir, test_case_type, test_case_num,
if default_num_stochastic_runs is None:
self.default_num_stochastic_runs = config_multialgorithm['num_ssa_verification_sim_runs']

# todo: QUANT DIFF: add 'evaluate' option
def verify_model(self, num_discrete_stochastic_runs=None, discard_run_results=True, plot_file=None,
ode_time_step_factor=None, tolerances=None, evaluate=False):
""" Verify a model
Expand All @@ -564,8 +571,6 @@ def verify_model(self, num_discrete_stochastic_runs=None, discard_run_results=Tr
tolerances (:obj:`dict`, optional): if testing tolerances, values of ODE solver tolerances
evaluate (:obj:`bool`, optional): control the return value
# todo: QUANT DIFF: depends on 'evaluate' option
# todo: QUANT DIFF: must return
Returns:
:obj:`obj`: if `evaluate` is `False`, then return `False` if populations in the expected
result and simulation run are equal within tolerances, otherwise :obj:`list`: of species
Expand Down Expand Up @@ -593,7 +598,6 @@ def verify_model(self, num_discrete_stochastic_runs=None, discard_run_results=Tr
if 'start' in settings and settings['start'] != 0:
raise VerificationError("non-zero start setting ({}) not supported".format(settings['start']))

# todo: QUANT DIFF: depends on 'evaluate' option
if self.verification_test_reader.test_case_type == VerificationTestCaseType.CONTINUOUS_DETERMINISTIC \
and evaluate:
raise VerificationError("evaluate is True and test_case_type is CONTINUOUS_DETERMINISTIC")
Expand Down Expand Up @@ -645,7 +649,6 @@ def verify_model(self, num_discrete_stochastic_runs=None, discard_run_results=Tr
self.results_comparator = ResultsComparator(self.verification_test_reader,
self.simulation_run_results)
self.comparison_result = self.results_comparator.differs()
# todo: QUANT DIFF: depends on 'evaluate' option
if evaluate:
self.evaluation = self.results_comparator.quantify_stoch_diff(evaluate=evaluate)
# if model & simulation verify or evaluating, don't retry
Expand All @@ -663,7 +666,6 @@ def verify_model(self, num_discrete_stochastic_runs=None, discard_run_results=Tr
if discard_run_results:
shutil.rmtree(self.tmp_results_dir)

# todo: QUANT DIFF: depends on 'evaluate' option
if evaluate:
return self.evaluation
return self.comparison_result
Expand Down Expand Up @@ -867,7 +869,6 @@ class VerificationResultType(Enum):
VerificationRunResult.duration.__doc__ = 'time it took to run the test'
VerificationRunResult.quant_diff.__doc__ = ('mean Z-score difference between correct means and actual '
'simulation predictions')
# todo: QUANT DIFF: rename to params
VerificationRunResult.params.__doc__ = 'optional, parameters used by the test'
VerificationRunResult.error.__doc__ = 'optional, error message for the test'

Expand Down Expand Up @@ -941,10 +942,8 @@ def dump_results(self, errors=False):
for attr in self.RESULTS_ATTRIBUTES_TO_DUMP:
row[attr] = getattr(result, attr)
row['result_type'] = result.result_type.name
# todo: QUANT DIFF: rename to params
if result.params:
params = eval(result.params)
params = DictUtil.flatten_dict(params)
params = DictUtil.flatten_dict(result.params)
for k, v in params.items():
row[k] = v

Expand All @@ -960,7 +959,6 @@ def dump_results(self, errors=False):

return formatted_results

# todo: QUANT DIFF: add 'evaluate' option
def _run_test(self, case_type_name, case_num, num_stochastic_runs=None,
ode_time_step_factor=None, rtol=None, atol=None, verbose=False, evaluate=False):
""" Run one test case and record the result
Expand Down Expand Up @@ -1019,27 +1017,20 @@ def _run_test(self, case_type_name, case_num, num_stochastic_runs=None,
f"{case_type_name}_{case_num}_{plot_name_append}_verification_test.pdf")
kwargs['plot_file'] = plot_file

# save pretty printed kwargs in results; they can be restored with eval()
# todo: QUANT DIFF: FIX: no, just put the dict in results
pformat_kwargs = pformat(kwargs)

if verbose:
print("Verifying {} case {}".format(case_type_name, case_num))

try:
start_time = time.process_time()
# todo: QUANT DIFF: if evaluate, set it in kwargs
if evaluate:
kwargs['evaluate'] = True
verification_result = case_verifier.verify_model(**kwargs)

run_time = time.process_time() - start_time
# todo: QUANT DIFF: make results_kwargs
results_kwargs = {}
results_kwargs['params'] = pformat_kwargs
if evaluate:
results_kwargs['quant_diff'] = verification_result
# todo: QUANT DIFF: if evaluate, don't worry about setting the VerificationRunResult.result_type
# since evaluate, don't worry about setting the VerificationRunResult.result_type
result_type = VerificationResultType.VERIFICATION_UNKNOWN
else:
if verification_result:
Expand All @@ -1048,16 +1039,15 @@ def _run_test(self, case_type_name, case_num, num_stochastic_runs=None,
else:
result_type = VerificationResultType.CASE_VERIFIED

self._record_result(case_type_name, case_num, result_type, run_time,
self._record_result(case_type_name, case_num, result_type, run_time, params=kwargs,
**results_kwargs)

except Exception as e:
run_time = time.process_time() - start_time
tb = traceback.format_exc()
self._record_result(case_type_name, case_num, VerificationResultType.FAILED_VERIFICATION_RUN,
run_time, params=pformat_kwargs, error=tb)
run_time, params=kwargs, error=tb)

# todo: QUANT DIFF: add 'evaluate' option
def _run_tests(self, case_type_name, case_num, num_stochastic_runs=None,
ode_time_step_factors=None, tolerance_ranges=None, verbose=False,
empty_results=False, evaluate=False):
Expand Down Expand Up @@ -1107,7 +1097,6 @@ def tolerance_ranges_for_sensitivity_analysis():
max=VerificationSuite.DEFAULT_MAX_ATOL)}
return tolerance_ranges

# todo: QUANT DIFF: add 'evaluate' option
def run(self, test_case_type_name=None, cases=None, num_stochastic_runs=None,
ode_time_step_factors=None, tolerance_ranges=None, verbose=False, empty_results=True,
evaluate=False):
Expand Down Expand Up @@ -1161,7 +1150,6 @@ def run(self, test_case_type_name=None, cases=None, num_stochastic_runs=None,
tolerance_ranges=tolerance_ranges, verbose=verbose, evaluate=evaluate)
return self.results

# todo: QUANT DIFF: add 'evaluate' option
ODE_TIME_STEP_FACTORS = [0.05, 0.1, 1.0]
def run_multialg(self, cases, ode_time_step_factors=None, tolerances=False, verbose=None,
evaluate=True):
Expand Down

0 comments on commit 1139b35

Please sign in to comment.