Skip to content

Commit

Permalink
[TPU] Fix test assertion error from artifacts (#19825)
Browse files Browse the repository at this point in the history
  • Loading branch information
awaelchli committed May 23, 2024
1 parent e0d7ede commit 7874cd0
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion tests/tests_pytorch/accelerators/test_xla.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test_resume_training_on_cpu(tmp_path):
"""Checks if training can be resumed from a saved checkpoint on CPU."""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto")
trainer = Trainer(max_epochs=1, accelerator="tpu", devices="auto", default_root_dir=tmp_path)
trainer.fit(model)

if trainer.world_size != trainer.num_devices:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,6 @@ def on_train_start(self):
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_num_stepping_batches_with_tpu_multi():
"""Test stepping batches with the TPU strategy across multiple devices."""
trainer = Trainer(accelerator="tpu", devices="auto", max_epochs=1)
trainer = Trainer(accelerator="tpu", devices="auto", max_epochs=1, logger=False, enable_checkpointing=False)
model = MultiprocessModel()
trainer.fit(model)

0 comments on commit 7874cd0

Please sign in to comment.