Skip to content

Commit

Permalink
black formatting with line break
Browse files Browse the repository at this point in the history
x
  • Loading branch information
awaelchli committed Aug 13, 2021
1 parent 49f376b commit 77e7027
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 6 deletions.
17 changes: 12 additions & 5 deletions tests/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ def test_model_checkpoint_state_id():
early_stopping = ModelCheckpoint(monitor="val_loss")
assert (
early_stopping.state_id
== "ModelCheckpoint{'monitor': 'val_loss', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': None}"
== "ModelCheckpoint{'monitor': 'val_loss', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': None}"
)


Expand Down Expand Up @@ -157,7 +158,8 @@ def on_validation_epoch_end(self):
assert chk["global_step"] == limit_train_batches * (epoch + 1)

mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}}"
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
Expand Down Expand Up @@ -270,7 +272,8 @@ def _make_assertions(epoch, ix, version=""):
assert chk["global_step"] == expected_global_step

mc_specific_data = chk["callbacks"][
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': False}}"
f"ModelCheckpoint{{'monitor': '{monitor}', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': False}"
]
assert mc_specific_data["dirpath"] == checkpoint.dirpath
assert mc_specific_data["monitor"] == monitor
Expand Down Expand Up @@ -870,7 +873,10 @@ def test_model_checkpoint_save_last_checkpoint_contents(tmpdir):
assert ckpt_last_epoch["epoch"] == ckpt_last["epoch"]
assert ckpt_last_epoch["global_step"] == ckpt_last["global_step"]

ckpt_id = "ModelCheckpoint{'monitor': 'early_stop_on', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}"
ckpt_id = (
"ModelCheckpoint{'monitor': 'early_stop_on', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
)
assert ckpt_last["callbacks"][ckpt_id] == ckpt_last_epoch["callbacks"][ckpt_id]

# it is easier to load the model objects than to iterate over the raw dict of tensors
Expand Down Expand Up @@ -1111,7 +1117,8 @@ def training_step(self, *args):
ckpts = [torch.load(str(ckpt)) for ckpt in tmpdir.listdir()]
ckpts = [
ckpt["callbacks"][
"ModelCheckpoint{'monitor': 'foo', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}"
"ModelCheckpoint{'monitor': 'foo', 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
]
for ckpt in ckpts
]
Expand Down
3 changes: 2 additions & 1 deletion tests/trainer/connectors/test_callback_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,8 @@ def test_all_callback_states_saved_before_checkpoint_callback(tmpdir):
assert "content1" in state1 and state1["content1"] == "one"
assert "content1" in state2 and state2["content1"] == "two"
assert (
"ModelCheckpoint{'monitor': None, 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1, 'train_time_interval': None, 'save_on_train_epoch_end': True}"
"ModelCheckpoint{'monitor': None, 'mode': 'min', 'every_n_train_steps': 0, 'every_n_epochs': 1,"
" 'train_time_interval': None, 'save_on_train_epoch_end': True}"
in ckpt["callbacks"]
)

Expand Down

0 comments on commit 77e7027

Please sign in to comment.