Skip to content

Commit

Permalink
simplify Simulate().run() by returning namedtuple containing num_even…
Browse files Browse the repository at this point in the history
…ts, results_dir & profile_stats; enables verbose and profile to both be true
  • Loading branch information
artgoldberg committed May 29, 2020
1 parent 6dcdb55 commit c96a805
Show file tree
Hide file tree
Showing 15 changed files with 107 additions and 85 deletions.
3 changes: 2 additions & 1 deletion examples/simulation_run.py
Expand Up @@ -22,7 +22,8 @@

# create and run simulation
simulation = Simulation(model_filename)
num_events, results_dir = simulation.run(time_max=30, results_dir=results_dir, checkpoint_period=10)
simulation_rv= simulation.run(time_max=30, results_dir=results_dir, checkpoint_period=10)
results_dir = simulation_rv.results_dir
run_results = RunResults(results_dir)

# view results
Expand Down
Expand Up @@ -25,9 +25,9 @@

# run simulation
sim = Simulation(model)
_, results_dirname = sim.run(time_max=time_max,
results_dir=results_parent_dirname,
checkpoint_period=checkpoint_period)
results_dirname = sim.run(time_max=time_max,
results_dir=results_parent_dirname,
checkpoint_period=checkpoint_period).results_dir
results = RunResults(results_dirname)

# plot results
Expand Down
11 changes: 5 additions & 6 deletions examples/translation_metabolism_hybrid_model/run.py
Expand Up @@ -25,15 +25,14 @@
# run simulation
seed = 100
sim = Simulation(model)
_, results_dirname = sim.run(time_max=time_max,
seed=seed,
results_dir=results_parent_dirname,
checkpoint_period=checkpoint_period)
results_dirname = sim.run(time_max=time_max,
seed=seed,
results_dir=results_parent_dirname,
checkpoint_period=checkpoint_period).results_dir
results = RunResults(results_dirname)

# plot results


# plot results
def plot(model, results, filename):
# get expected results
mean_doubling_time = model.parameters.get_one(id='mean_doubling_time').value
Expand Down
24 changes: 24 additions & 0 deletions tests/perf_results/wc_sim_performance_log.txt
Expand Up @@ -130,3 +130,27 @@ Performance summary for Next Reaction Method on 2020-05-24:
8 2698 2.674 1009.138
32 10465 13.938 750.826

Performance summary for Stochastic Simulation Algorithm on 2020-05-28:
# SSA submodels # events run time (s) reactions/s
2 701 1.661 422.088
8 2781 5.806 479.017
32 10777 23.608 456.499

Performance summary for Next Reaction Method on 2020-05-28:
# NRM submodels # events run time (s) reactions/s
2 619 1.328 466.247
8 2741 5.333 513.990
32 10714 16.101 665.445

Performance summary for Stochastic Simulation Algorithm on 2020-05-29:
# SSA submodels # events run time (s) reactions/s
2 608 2.175 279.557
8 2590 5.289 489.684
32 10640 24.337 437.187

Performance summary for Next Reaction Method on 2020-05-29:
# NRM submodels # events run time (s) reactions/s
2 729 0.711 1024.944
8 2686 3.585 749.137
32 10716 16.217 660.800

2 changes: 1 addition & 1 deletion tests/submodels/test_nrm.py
Expand Up @@ -132,7 +132,7 @@ def test_simulate(self):
for _ in range(NUM_TRIALS):
simulation_engine, _ = self.make_sim_w_nrm_submodel(self.model, True)
simulation_engine.initialize()
num_events.append(simulation_engine.simulate(RUN_TIME))
num_events.append(simulation_engine.simulate(RUN_TIME).num_events)
num_reactions = len(self.model.reactions)
expected_mean_num_events = num_reactions * RUN_TIME
sd = math.sqrt(expected_mean_num_events)
Expand Down
4 changes: 2 additions & 2 deletions tests/submodels/test_submodels.py
Expand Up @@ -215,7 +215,7 @@ def test_skeleton_submodel(self):

time_max = 100
skeleton_submodel = self.make_sim_w_skeleton_submodel(lang_submodel, behavior)
self.assertEqual(self.simulator.simulate(time_max),
self.assertEqual(self.simulator.simulate(time_max).num_events,
time_max / behavior[SkeletonSubmodel.INTER_REACTION_TIME])

behavior = {SkeletonSubmodel.INTER_REACTION_TIME: 2,
Expand Down Expand Up @@ -328,5 +328,5 @@ def test_simulate_deterministic_simulation_algorithm_submodel(self):
model = MakeModel.make_test_model('1 species, 1 reaction')
self.transform_model_for_dsa_simulation(model)
simulation = Simulation(model)
num_events, _ = simulation.run(time_max=100)
num_events = simulation.run(time_max=100).num_events
self.assertGreater(num_events, 0)
16 changes: 8 additions & 8 deletions tests/test_dynamic_mass.py
Expand Up @@ -182,10 +182,10 @@ def simulate(self, model, time_max=100.):
env.set('CONFIG__DOT__wc_lang__DOT__validation__DOT__validate_element_charge_balance', '0')
with env:
simulation = Simulation(model)
_, results_dirname = simulation.run(time_max=time_max,
ode_time_step=1.,
checkpoint_period=1.,
results_dir=self.tempdir)
results_dirname = simulation.run(time_max=time_max,
ode_time_step=1.,
checkpoint_period=1.,
results_dir=self.tempdir).results_dir

# get results
results = RunResults(results_dirname)
Expand Down Expand Up @@ -465,10 +465,10 @@ def test(self):
model = wc_lang.io.Reader().run(model_filename)[wc_lang.Model][0]

simulation = Simulation(model)
_, results_dirname = simulation.run(time_max=8 * 3600,
ode_time_step=1.,
checkpoint_period=100.,
results_dir=self.tempdir)
results_dirname = simulation.run(time_max=8 * 3600,
ode_time_step=1.,
checkpoint_period=100.,
results_dir=self.tempdir).results_dir

# get results
results = RunResults(results_dirname)
Expand Down
5 changes: 2 additions & 3 deletions tests/test_multialgorithm_simulation.py
Expand Up @@ -554,7 +554,7 @@ def test_performance(self):
simulation_engine = self.prep_simulation(num_ssa_submodels,
submodel_framework=submodel_framework)
start_time = time.process_time()
num_events = simulation_engine.simulate(end_sim_time)
num_events = simulation_engine.simulate(end_sim_time).num_events
run_time = time.process_time() - start_time
unprofiled_perf.append("{}\t{}\t{:8.3f}\t{:8.3f}".format(num_ssa_submodels, num_events,
run_time, num_events/run_time))
Expand All @@ -565,8 +565,7 @@ def test_performance(self):
out_file = os.path.join(self.out_dir, "profile_out_{}.out".format(num_ssa_submodels))
locals = {'simulation_engine': simulation_engine,
'end_sim_time': end_sim_time}
cProfile.runctx('num_events = simulation_engine.simulate(end_sim_time)',
{}, locals, filename=out_file)
cProfile.runctx('simulation_engine.simulate(end_sim_time)', {}, locals, filename=out_file)
profile = pstats.Stats(out_file)
print(f"Profile for {num_ssa_submodels} instances of {framework_name}:")
profile.strip_dirs().sort_stats('cumulative').print_stats(15)
Expand Down
16 changes: 8 additions & 8 deletions tests/test_run_results.py
Expand Up @@ -45,10 +45,10 @@ def setUpClass(cls):
cls.max_time = 30

with CaptureOutput(relay=True):
_, cls.results_dir_1_cmpt = simulation.run(time_max=cls.max_time,
results_dir=tempfile.mkdtemp(dir=cls.temp_dir),
checkpoint_period=cls.checkpoint_period,
verbose=True)
cls.results_dir_1_cmpt = simulation.run(time_max=cls.max_time,
results_dir=tempfile.mkdtemp(dir=cls.temp_dir),
checkpoint_period=cls.checkpoint_period,
verbose=True).results_dir

# run a simulation whose aggregate states vary over time
exchange_rxn_model = os.path.join(os.path.dirname(__file__), 'fixtures', 'dynamic_tests',
Expand All @@ -60,9 +60,9 @@ def setUpClass(cls):
comp_e.biological_type = comp_c.biological_type
simulation = Simulation(model)
with CaptureOutput(relay=False):
_, cls.results_dir_dyn_aggr = simulation.run(time_max=cls.max_time,
results_dir=tempfile.mkdtemp(dir=cls.temp_dir),
checkpoint_period=cls.checkpoint_period)
cls.results_dir_dyn_aggr = simulation.run(time_max=cls.max_time,
results_dir=tempfile.mkdtemp(dir=cls.temp_dir),
checkpoint_period=cls.checkpoint_period).results_dir

@classmethod
def tearDownClass(cls):
Expand Down Expand Up @@ -303,7 +303,7 @@ def run_performance_profile(self, num_species, species_pop, species_mw, num_chec
species_mw)
wc_simulation_metadata = WCSimulationMetadata(wc_sim_config)
simulation_engine.initialize()
num_events = simulation_engine.simulate(sim_config=de_simulation_config)
num_events = simulation_engine.simulate(sim_config=de_simulation_config).num_events
print(simulation_engine.provide_event_counts())
WCSimulationMetadata.write_dataclass(wc_simulation_metadata, run_results_dir)

Expand Down
66 changes: 37 additions & 29 deletions tests/test_simulation.py
Expand Up @@ -39,11 +39,11 @@ def tearDown(self):
def run_simulation(self, simulation, time_max=100):
checkpoint_period = min(10, time_max)
with CaptureOutput(relay=False):
num_events, results_dir = simulation.run(time_max=time_max, results_dir=self.results_dir,
checkpoint_period=checkpoint_period)
self.assertTrue(0 < num_events)
self.assertTrue(os.path.isdir(results_dir))
run_results = RunResults(results_dir)
simulation_rv = simulation.run(time_max=time_max, results_dir=self.results_dir,
checkpoint_period=checkpoint_period)
self.assertTrue(0 < simulation_rv.num_events)
self.assertTrue(os.path.isdir(simulation_rv.results_dir))
run_results = RunResults(simulation_rv.results_dir)

for component in RunResults.COMPONENTS:
self.assertTrue(isinstance(run_results.get(component), (pandas.DataFrame, pandas.Series)))
Expand All @@ -70,18 +70,18 @@ def test_simulation_errors(self):

def test_simulate_wo_output_files(self):
with CaptureOutput(relay=False):
num_events, results_dir = Simulation(TOY_MODEL_FILENAME).run(time_max=5)
self.assertTrue(0 < num_events)
self.assertEqual(results_dir, None)
simulation_rv = Simulation(TOY_MODEL_FILENAME).run(time_max=5)
self.assertTrue(0 < simulation_rv.num_events)
self.assertEqual(simulation_rv.results_dir, None)

def test_run(self):
with CaptureOutput(relay=False):
num_events, results_dir = Simulation(TOY_MODEL_FILENAME).run(time_max=2,
results_dir=self.results_dir,
checkpoint_period=1)
simulation_rv = Simulation(TOY_MODEL_FILENAME).run(time_max=2,
results_dir=self.results_dir,
checkpoint_period=1)

# check time, and simulation config in checkpoints
access_checkpoints = AccessCheckpoints(results_dir)
access_checkpoints = AccessCheckpoints(simulation_rv.results_dir)
for time in access_checkpoints.list_checkpoints():
ckpt = access_checkpoints.get_checkpoint(time=time)
self.assertEqual(time, ckpt.time)
Expand All @@ -90,23 +90,31 @@ def test_run(self):
# test performance profiling
results_dir = tempfile.mkdtemp(dir=self.test_dir)
with CaptureOutput(relay=False) as capturer:
stats, _ = Simulation(TOY_MODEL_FILENAME).run(time_max=20,
results_dir=results_dir,
checkpoint_period=1,
profile=True,
verbose=False)
simulation_rv = Simulation(TOY_MODEL_FILENAME).run(time_max=20,
results_dir=results_dir,
checkpoint_period=1,
profile=True,
verbose=False)
expected_profile_text =['function calls', 'filename:lineno(function)']
for text in expected_profile_text:
self.assertIn(text, capturer.get_text())
self.assertTrue(isinstance(stats, pstats.Stats))
self.assertTrue(isinstance(simulation_rv.profile_stats, pstats.Stats))
self.assertTrue(isinstance(RunResults(results_dir), RunResults))

with self.assertRaises(MultialgorithmError):
# test profile and verbose both True
with CaptureOutput(relay=False) as capturer:
Simulation(TOY_MODEL_FILENAME).run(time_max=2,
results_dir=tempfile.mkdtemp(dir=self.test_dir),
checkpoint_period=1,
profile=True,
verbose=True)
expected_patterns =['function calls',
'filename:lineno\(function\)',
'Simulated \d+ events',
'Caching statistics',
'Saved checkpoints and run results']
for pattern in expected_patterns:
self.assertRegex(capturer.get_text(), pattern)

with self.assertRaisesRegex(MultialgorithmError, 'cannot be simulated .* it contains no submodels'):
Simulation(TOY_MODEL_FILENAME).run(time_max=5,
Expand All @@ -122,12 +130,12 @@ def test_reseed(self):
for seed in seeds:
tmp_results_dir = tempfile.mkdtemp()
with CaptureOutput(relay=False):
num_events, results_dir = Simulation(TOY_MODEL_FILENAME).run(time_max=5,
results_dir=tmp_results_dir,
checkpoint_period=5, seed=seed)
simulation_rv = Simulation(TOY_MODEL_FILENAME).run(time_max=5,
results_dir=tmp_results_dir,
checkpoint_period=5, seed=seed)
results[seed] = {}
results[seed]['num_events'] = num_events
run_results[seed] = RunResults(results_dir)
results[seed]['num_events'] = simulation_rv.num_events
run_results[seed] = RunResults(simulation_rv.results_dir)
shutil.rmtree(tmp_results_dir)
self.assertNotEqual(results[seeds[0]]['num_events'], results[seeds[1]]['num_events'])
self.assertFalse(run_results[seeds[0]].get('populations').equals(run_results[seeds[1]].get('populations')))
Expand All @@ -139,12 +147,12 @@ def test_reseed(self):
for rep in range(2):
tmp_results_dir = tempfile.mkdtemp()
with CaptureOutput(relay=False):
num_events, results_dir = Simulation(TOY_MODEL_FILENAME).run(time_max=5,
results_dir=tmp_results_dir,
checkpoint_period=5, seed=seed)
simulation_rv = Simulation(TOY_MODEL_FILENAME).run(time_max=5,
results_dir=tmp_results_dir,
checkpoint_period=5, seed=seed)
results[rep] = {}
results[rep]['num_events'] = num_events
run_results[rep] = RunResults(results_dir)
results[rep]['num_events'] = simulation_rv.num_events
run_results[rep] = RunResults(simulation_rv.results_dir)
shutil.rmtree(tmp_results_dir)
self.assertEqual(results[0]['num_events'], results[1]['num_events'])
self.assertTrue(run_results[0].get('populations').equals(run_results[1].get('populations')))
Expand Down
2 changes: 1 addition & 1 deletion tests/testing/test_utils.py
Expand Up @@ -63,7 +63,7 @@ def test_check_simul_results(self):

# test dynamics
simulation = Simulation(model)
_, results_dir = simulation.run(time_max=2, **self.args)
results_dir = simulation.run(time_max=2, **self.args).results_dir
nan = float('NaN')
check_simul_results(self, dynamic_model, results_dir,
expected_initial_values=expected_initial_values,
Expand Down
4 changes: 0 additions & 4 deletions wc_sim/sim_config.py
Expand Up @@ -132,10 +132,6 @@ def validate(self):
self.check_periodic_timestep('dfba_time_step')
self.check_periodic_timestep('checkpoint_period')

# verbose and profile should not both be set
if self.verbose and de_sim_config.profile:
raise MultialgorithmError(f"verbose and profile cannot both be true")

def check_periodic_timestep(self, periodic_attr):
""" Check that simulation duration is an integral multiple of a periodic activity's timestep
Expand Down

0 comments on commit c96a805

Please sign in to comment.