Skip to content

Commit

Permalink
missed
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Jun 5, 2020
1 parent 15a71ab commit 2eb9c00
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 22 deletions.
2 changes: 1 addition & 1 deletion docs/source/weights_loading.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ To change the checkpoint path pass in:

.. testcode::

trainer = Trainer(default_save_path='/your/path/to/save/checkpoints')
trainer = Trainer(default_root_dir='/your/path/to/save/checkpoints')

To modify the behavior of checkpointing pass in your own callback.

Expand Down
5 changes: 0 additions & 5 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,11 +147,6 @@ def __init__(
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed
default_save_path:
.. warning:: .. deprecated:: 0.7.3
Use `default_root_dir` instead. Will remove 0.9.0.
gradient_clip_val: 0 means don't clip.
gradient_clip:
Expand Down
2 changes: 1 addition & 1 deletion tests/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def test_logger_reset_correctly(tmpdir, extra_params):
model = EvalModelTemplate()

trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
**extra_params
)
logger1 = trainer.logger
Expand Down
18 changes: 9 additions & 9 deletions tests/trainer/test_lr_finder.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def test_error_on_more_than_1_optimizer(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1
)

Expand All @@ -29,7 +29,7 @@ def test_model_reset_correctly(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1
)

Expand All @@ -51,7 +51,7 @@ def test_trainer_reset_correctly(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1
)

Expand Down Expand Up @@ -81,7 +81,7 @@ def test_trainer_arg_bool(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=2,
auto_lr_find=True
)
Expand All @@ -100,7 +100,7 @@ def test_trainer_arg_str(tmpdir):
before_lr = model.my_fancy_lr
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=2,
auto_lr_find='my_fancy_lr'
)
Expand All @@ -120,7 +120,7 @@ def test_call_to_trainer_method(tmpdir):
before_lr = hparams.get('learning_rate')
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=2,
)

Expand All @@ -144,7 +144,7 @@ def test_accumulation_and_early_stopping(tmpdir):
before_lr = hparams.get('learning_rate')
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
accumulate_grad_batches=2,
)

Expand All @@ -167,7 +167,7 @@ def test_suggestion_parameters_work(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=3,
)

Expand All @@ -187,7 +187,7 @@ def test_suggestion_with_non_finite_values(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=3
)

Expand Down
2 changes: 1 addition & 1 deletion tests/trainer/test_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,6 @@ def configure_optimizers(self):
model = CurrentModel(hparams)

# fit model
trainer = Trainer(default_save_path=tmpdir, max_epochs=1)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
result = trainer.fit(model)
assert result == 1
2 changes: 1 addition & 1 deletion tests/trainer/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ def _optimizer_step(*args, **kwargs):

def test_gpu_choice(tmpdir):
trainer_options = dict(
default_save_path=tmpdir,
default_root_dir=tmpdir,
)
# Only run if CUDA is available
if not torch.cuda.is_available():
Expand Down
8 changes: 4 additions & 4 deletions tests/trainer/test_trainer_tricks.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def test_model_reset_correctly(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1
)

Expand All @@ -38,7 +38,7 @@ def test_trainer_reset_correctly(tmpdir):

# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1
)

Expand Down Expand Up @@ -77,7 +77,7 @@ def test_trainer_arg(tmpdir, scale_arg):
before_batch_size = hparams.get('batch_size')
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1,
auto_scale_batch_size=scale_arg,
)
Expand All @@ -99,7 +99,7 @@ def test_call_to_trainer_method(tmpdir, scale_method):
before_batch_size = hparams.get('batch_size')
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
default_root_dir=tmpdir,
max_epochs=1,
)

Expand Down

0 comments on commit 2eb9c00

Please sign in to comment.