Skip to content

Commit

Permalink
test: replacing tmpdir with tmp_path in tests_pytorch/profiler (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
fnhirwa committed Mar 17, 2024
1 parent 82f8ab9 commit 7cc71dc
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 71 deletions.
136 changes: 70 additions & 66 deletions tests/tests_pytorch/profilers/test_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,36 +88,36 @@ def test_simple_profiler_value_errors(simple_profiler):
simple_profiler.stop(action)


def test_simple_profiler_deepcopy(tmpdir):
simple_profiler = SimpleProfiler(dirpath=tmpdir, filename="test")
def test_simple_profiler_deepcopy(tmp_path):
simple_profiler = SimpleProfiler(dirpath=tmp_path, filename="test")
simple_profiler.describe()
assert deepcopy(simple_profiler)


def test_simple_profiler_dirpath(tmpdir):
def test_simple_profiler_dirpath(tmp_path):
"""Ensure the profiler dirpath defaults to `trainer.log_dir` when not present."""
profiler = SimpleProfiler(filename="profiler")
assert profiler.dirpath is None

model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, profiler=profiler, logger=False)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, profiler=profiler, logger=False)
trainer.fit(model)

assert trainer.log_dir == tmpdir
assert profiler.dirpath == trainer.log_dir
assert tmpdir.join("fit-profiler.txt").exists()
assert trainer.log_dir == str(tmp_path)
assert profiler.dirpath == str(trainer.log_dir)
assert (tmp_path / "fit-profiler.txt").exists()


def test_simple_profiler_with_nonexisting_log_dir(tmpdir):
def test_simple_profiler_with_nonexisting_log_dir(tmp_path):
"""Ensure the profiler dirpath defaults to `trainer.log_dir`and creates it when not present."""
nonexisting_tmpdir = tmpdir / "nonexisting"
nonexisting_tmp_path = tmp_path / "nonexisting"

profiler = SimpleProfiler(filename="profiler")
assert profiler.dirpath is None

model = BoringModel()
trainer = Trainer(
default_root_dir=nonexisting_tmpdir,
default_root_dir=nonexisting_tmp_path,
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
Expand All @@ -126,35 +126,35 @@ def test_simple_profiler_with_nonexisting_log_dir(tmpdir):
)
trainer.fit(model)

assert nonexisting_tmpdir.exists()
assert trainer.log_dir == nonexisting_tmpdir
assert nonexisting_tmp_path.exists()
assert trainer.log_dir == str(nonexisting_tmp_path)
assert profiler.dirpath == trainer.log_dir
assert nonexisting_tmpdir.join("fit-profiler.txt").exists()
assert (nonexisting_tmp_path / "fit-profiler.txt").exists()


def test_simple_profiler_with_nonexisting_dirpath(tmpdir):
def test_simple_profiler_with_nonexisting_dirpath(tmp_path):
"""Ensure the profiler creates non-existing dirpath."""
nonexisting_tmpdir = tmpdir / "nonexisting"
nonexisting_tmp_path = tmp_path / "nonexisting"

profiler = SimpleProfiler(dirpath=nonexisting_tmpdir, filename="profiler")
profiler = SimpleProfiler(dirpath=nonexisting_tmp_path, filename="profiler")

model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=1, limit_val_batches=1, profiler=profiler
default_root_dir=tmp_path, max_epochs=1, limit_train_batches=1, limit_val_batches=1, profiler=profiler
)
trainer.fit(model)

assert nonexisting_tmpdir.exists()
assert nonexisting_tmpdir.join("fit-profiler.txt").exists()
assert nonexisting_tmp_path.exists()
assert (nonexisting_tmp_path / "fit-profiler.txt").exists()


@RunIf(skip_windows=True)
def test_simple_profiler_distributed_files(tmpdir):
def test_simple_profiler_distributed_files(tmp_path):
"""Ensure the proper files are saved in distributed."""
profiler = SimpleProfiler(dirpath=tmpdir, filename="profiler")
profiler = SimpleProfiler(dirpath=tmp_path, filename="profiler")
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=2,
strategy="ddp_spawn",
accelerator="cpu",
Expand All @@ -170,14 +170,14 @@ def test_simple_profiler_distributed_files(tmpdir):
expected = {f"{stage}-profiler-{rank}.txt" for stage in ("fit", "validate", "test") for rank in (0, 1)}
assert actual == expected

for f in profiler.dirpath.listdir():
for f in profiler.dirpath.iterdir():
assert f.read_text("utf-8")


def test_simple_profiler_logs(tmpdir, caplog, simple_profiler):
def test_simple_profiler_logs(tmp_path, caplog, simple_profiler):
"""Ensure that the number of printed logs is correct."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2, profiler=simple_profiler, logger=False)
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=2, profiler=simple_profiler, logger=False)
with caplog.at_level(logging.INFO, logger="lightning.pytorch.profiler"):
trainer.fit(model)
trainer.test(model)
Expand All @@ -187,7 +187,7 @@ def test_simple_profiler_logs(tmpdir, caplog, simple_profiler):

@pytest.mark.parametrize("extended", [True, False])
@patch("time.monotonic", return_value=70)
def test_simple_profiler_summary(tmpdir, extended):
def test_simple_profiler_summary(tmp_path, extended):
"""Test the summary of `SimpleProfiler`."""
profiler = SimpleProfiler(extended=extended)
profiler.start_time = 63.0
Expand Down Expand Up @@ -256,8 +256,8 @@ def test_simple_profiler_summary(tmpdir, extended):


@pytest.fixture()
def advanced_profiler(tmpdir):
return AdvancedProfiler(dirpath=tmpdir, filename="profiler")
def advanced_profiler(tmp_path):
return AdvancedProfiler(dirpath=tmp_path, filename="profiler")


@pytest.mark.flaky(reruns=3)
Expand Down Expand Up @@ -287,7 +287,7 @@ def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
assert average_duration < PROFILER_OVERHEAD_MAX_TOLERANCE


def test_advanced_profiler_describe(tmpdir, advanced_profiler):
def test_advanced_profiler_describe(tmp_path, advanced_profiler):
"""Ensure the profiler won't fail when reporting the summary."""
# record at least one event
with advanced_profiler.profile("test"):
Expand Down Expand Up @@ -315,8 +315,8 @@ def test_advanced_profiler_deepcopy(advanced_profiler):


@pytest.fixture()
def pytorch_profiler(tmpdir):
return PyTorchProfiler(dirpath=tmpdir, filename="profiler")
def pytorch_profiler(tmp_path):
return PyTorchProfiler(dirpath=tmp_path, filename="profiler")


@pytest.mark.xfail(raises=AssertionError, reason="TODO: Support after 1.11 profiler added")
Expand All @@ -332,11 +332,11 @@ def test_pytorch_profiler_describe(pytorch_profiler):
assert len(data) > 0


def test_advanced_profiler_cprofile_deepcopy(tmpdir):
def test_advanced_profiler_cprofile_deepcopy(tmp_path):
"""Checks for pickle issue reported in #6522."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=True,
profiler="advanced",
callbacks=StochasticWeightAveraging(swa_lrs=1e-2),
Expand All @@ -345,11 +345,11 @@ def test_advanced_profiler_cprofile_deepcopy(tmpdir):


@RunIf(min_cuda_gpus=2, standalone=True)
def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler):
def test_pytorch_profiler_trainer_ddp(tmp_path, pytorch_profiler):
"""Ensure that the profiler can be given to the training and default step are properly recorded."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
max_epochs=1,
limit_train_batches=5,
limit_val_batches=5,
Expand Down Expand Up @@ -388,11 +388,11 @@ def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler):

@pytest.mark.parametrize("fast_dev_run", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("boring_model_cls", [ManualOptimBoringModel, BoringModel])
def test_pytorch_profiler_trainer_fit(fast_dev_run, boring_model_cls, tmpdir):
def test_pytorch_profiler_trainer_fit(fast_dev_run, boring_model_cls, tmp_path):
"""Ensure that the profiler can be given to the trainer and test step are properly recorded."""
pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profile")
pytorch_profiler = PyTorchProfiler(dirpath=tmp_path, filename="profile")
model = boring_model_cls()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, fast_dev_run=fast_dev_run, profiler=pytorch_profiler)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, fast_dev_run=fast_dev_run, profiler=pytorch_profiler)
trainer.fit(model)

assert sum(
Expand All @@ -404,18 +404,18 @@ def test_pytorch_profiler_trainer_fit(fast_dev_run, boring_model_cls, tmpdir):
assert path.read_text("utf-8")

if _KINETO_AVAILABLE:
files = sorted(file for file in os.listdir(tmpdir) if file.endswith(".json"))
files = sorted(file for file in os.listdir(tmp_path) if file.endswith(".json"))
assert any(f"fit-{pytorch_profiler.filename}" in f for f in files)


@pytest.mark.parametrize(("fn", "step_name"), [("test", "test"), ("validate", "validation"), ("predict", "predict")])
@pytest.mark.parametrize("boring_model_cls", [BoringModel, ManualOptimBoringModel])
def test_pytorch_profiler_trainer(fn, step_name, boring_model_cls, tmpdir):
def test_pytorch_profiler_trainer(fn, step_name, boring_model_cls, tmp_path):
"""Ensure that the profiler can be given to the trainer and test step are properly recorded."""
pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profile", schedule=None)
pytorch_profiler = PyTorchProfiler(dirpath=tmp_path, filename="profile", schedule=None)
model = boring_model_cls()
model.predict_dataloader = model.train_dataloader
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_test_batches=2, profiler=pytorch_profiler)
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1, limit_test_batches=2, profiler=pytorch_profiler)
getattr(trainer, fn)(model)

assert sum(e.name.endswith(f"{step_name}_step") for e in pytorch_profiler.function_events)
Expand All @@ -424,13 +424,13 @@ def test_pytorch_profiler_trainer(fn, step_name, boring_model_cls, tmpdir):
assert path.read_text("utf-8")

if _KINETO_AVAILABLE:
files = sorted(file for file in os.listdir(tmpdir) if file.endswith(".json"))
files = sorted(file for file in os.listdir(tmp_path) if file.endswith(".json"))
assert any(f"{fn}-{pytorch_profiler.filename}" in f for f in files)


def test_pytorch_profiler_nested(tmpdir):
def test_pytorch_profiler_nested(tmp_path):
"""Ensure that the profiler handles nested context."""
pytorch_profiler = PyTorchProfiler(use_cuda=False, dirpath=tmpdir, filename="profiler", schedule=None)
pytorch_profiler = PyTorchProfiler(use_cuda=False, dirpath=tmp_path, filename="profiler", schedule=None)

with pytorch_profiler.profile("a"):
a = torch.ones(42)
Expand All @@ -451,7 +451,7 @@ def test_pytorch_profiler_nested(tmpdir):
assert events_name == expected, (events_name, torch.__version__, platform.system())


def test_pytorch_profiler_multiple_loggers(tmpdir):
def test_pytorch_profiler_multiple_loggers(tmp_path):
"""Tests whether the PyTorch profiler is able to write its trace locally when the Trainer is configured with
multiple loggers.
Expand All @@ -464,19 +464,21 @@ def look_for_trace(trace_dir):
return any("trace.json" in filename for filename in os.listdir(trace_dir))

model = BoringModel()
loggers = [TensorBoardLogger(save_dir=tmpdir), CSVLogger(tmpdir)]
trainer = Trainer(default_root_dir=tmpdir, profiler="pytorch", logger=loggers, limit_train_batches=5, max_epochs=1)
loggers = [TensorBoardLogger(save_dir=tmp_path), CSVLogger(tmp_path)]
trainer = Trainer(
default_root_dir=tmp_path, profiler="pytorch", logger=loggers, limit_train_batches=5, max_epochs=1
)
assert len(trainer.loggers) == 2
trainer.fit(model)
assert look_for_trace(tmpdir / "lightning_logs" / "version_0")
assert look_for_trace(tmp_path / "lightning_logs" / "version_0")


def test_register_record_function(tmpdir):
def test_register_record_function(tmp_path):
use_cuda = torch.cuda.is_available()
pytorch_profiler = PyTorchProfiler(
export_to_chrome=False,
use_cuda=use_cuda,
dirpath=tmpdir,
dirpath=tmp_path,
filename="profiler",
schedule=None,
on_trace_ready=None,
Expand Down Expand Up @@ -506,24 +508,24 @@ def __init__(self):


@pytest.mark.parametrize("cls", [SimpleProfiler, AdvancedProfiler, PyTorchProfiler])
def test_profiler_teardown(tmpdir, cls):
def test_profiler_teardown(tmp_path, cls):
"""This test checks if profiler teardown method is called when trainer is exiting."""

class TestCallback(Callback):
def on_fit_end(self, trainer, *args, **kwargs) -> None:
# describe sets it to None
assert trainer.profiler._output_file is None

profiler = cls(dirpath=tmpdir, filename="profiler")
profiler = cls(dirpath=tmp_path, filename="profiler")
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=1, profiler=profiler, callbacks=[TestCallback()])
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=1, profiler=profiler, callbacks=[TestCallback()])
trainer.fit(model)

assert profiler._output_file is None


def test_pytorch_profiler_deepcopy(tmpdir):
pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profiler", schedule=None)
def test_pytorch_profiler_deepcopy(tmp_path):
pytorch_profiler = PyTorchProfiler(dirpath=tmp_path, filename="profiler", schedule=None)
pytorch_profiler.start("on_train_start")
torch.tensor(1)
pytorch_profiler.describe()
Expand Down Expand Up @@ -574,22 +576,22 @@ def test_trainer_profiler_incorrect_str_arg():
({"limit_predict_batches": 4}, "predict"),
],
)
def test_pytorch_profiler_raises_warning_for_limited_steps(tmpdir, trainer_config, trainer_fn):
def test_pytorch_profiler_raises_warning_for_limited_steps(tmp_path, trainer_config, trainer_fn):
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, profiler="pytorch", max_epochs=1, **trainer_config)
trainer = Trainer(default_root_dir=tmp_path, profiler="pytorch", max_epochs=1, **trainer_config)
warning_cache.clear()
with pytest.warns(UserWarning, match="not enough steps to properly record traces"):
getattr(trainer, trainer_fn)(model)
assert trainer.profiler._schedule is None
warning_cache.clear()


def test_profile_callbacks(tmpdir):
def test_profile_callbacks(tmp_path):
"""Checks if profiling callbacks works correctly, specifically when there are two of the same callback type."""
pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profiler")
pytorch_profiler = PyTorchProfiler(dirpath=tmp_path, filename="profiler")
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
default_root_dir=tmp_path,
fast_dev_run=1,
profiler=pytorch_profiler,
callbacks=[EarlyStopping("val_loss"), EarlyStopping("train_loss")],
Expand All @@ -606,13 +608,15 @@ def test_profile_callbacks(tmpdir):


@RunIf(min_python="3.10")
def test_profiler_table_kwargs_summary_length(tmpdir):
def test_profiler_table_kwargs_summary_length(tmp_path):
"""Test if setting max_name_column_width in table_kwargs changes table width."""

summaries = []
# Default table_kwargs (None) sets max_name_column_width to 55
for table_kwargs in [{"max_name_column_width": 1}, {"max_name_column_width": 5}, None]:
pytorch_profiler = PyTorchProfiler(dirpath=tmpdir, filename="profile", schedule=None, table_kwargs=table_kwargs)
pytorch_profiler = PyTorchProfiler(
dirpath=tmp_path, filename="profile", schedule=None, table_kwargs=table_kwargs
)

with pytorch_profiler.profile("a"):
torch.ones(1)
Expand All @@ -624,17 +628,17 @@ def test_profiler_table_kwargs_summary_length(tmpdir):
assert summaries[1].count("-") < summaries[2].count("-")


def test_profiler_invalid_table_kwargs(tmpdir):
def test_profiler_invalid_table_kwargs(tmp_path):
"""Test if passing invalid keyword arguments raise expected error."""

for key in {"row_limit", "sort_by"}:
with pytest.raises(
KeyError,
match=f"Found invalid table_kwargs key: {key}. This is already a positional argument of the Profiler.",
):
PyTorchProfiler(table_kwargs={key: None}, dirpath=tmpdir, filename="profile")
PyTorchProfiler(table_kwargs={key: None}, dirpath=tmp_path, filename="profile")

for key in {"self", "non_existent_keyword_arg"}:
with pytest.raises(KeyError) as exc_info:
PyTorchProfiler(table_kwargs={key: None}, dirpath=tmpdir, filename="profile")
PyTorchProfiler(table_kwargs={key: None}, dirpath=tmp_path, filename="profile")
assert exc_info.value.args[0].startswith(f"Found invalid table_kwargs key: {key}.")

0 comments on commit 7cc71dc

Please sign in to comment.