Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/ptl fixes #806

Merged
merged 8 commits into from
Feb 17, 2022
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
9 changes: 8 additions & 1 deletion darts/models/forecasting/block_rnn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you maybe write down which device will be used by default?
Also, you could maybe add a small example here. e.g. "use: gpus=-1 to use all available GPUs" or something along those lines. Or maybe using "auto"?

https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,8 +546,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/pl_forecasting_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,10 +252,17 @@ def _create_from_cls_and_kwargs(cls, kws):
if self.lr_scheduler_cls is not None:
lr_sched_kws = {k: v for k, v in self.lr_scheduler_kwargs.items()}
lr_sched_kws["optimizer"] = optimizer

# ReduceLROnPlateau requires a metric to "monitor" which must be set separately, most others do not
lr_monitor = lr_sched_kws.pop("monitor", None)

lr_scheduler = _create_from_cls_and_kwargs(
self.lr_scheduler_cls, lr_sched_kws
)
return [optimizer], [lr_scheduler]
return [optimizer], {
"scheduler": lr_scheduler,
"monitor": lr_monitor if lr_monitor is not None else "val_loss",
}
else:
return optimizer

Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/rnn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,8 +279,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/tcn_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,8 +332,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/tft_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -639,8 +639,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
75 changes: 66 additions & 9 deletions darts/models/forecasting/torch_forecasting_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down Expand Up @@ -336,7 +343,9 @@ def __init__(
self.pl_module_params: Optional[Dict] = None

@staticmethod
def _extract_torch_devices(torch_device_str) -> Tuple[str, Optional[list], bool]:
def _extract_torch_devices(
torch_device_str,
) -> Tuple[str, Optional[Union[list, int]], bool]:
"""This method handles the deprecated `torch_device_str` and should be removed in a future Darts version.

Returns
Expand Down Expand Up @@ -372,13 +381,13 @@ def _extract_torch_devices(torch_device_str) -> Tuple[str, Optional[list], bool]

gpus = None
auto_select_gpus = False
accelerator = device_split[0]
if len(device_split) == 2 and accelerator == "cuda":
accelerator = "gpu" if device_split[0] == "cuda" else device_split[0]

if len(device_split) == 2 and accelerator == "gpu":
gpus = device_split[1]
gpus = [int(gpus)]
elif len(device_split) == 1:
if accelerator == "cuda":
accelerator = "gpu"
if accelerator == "gpu":
gpus = -1
auto_select_gpus = True
else:
Expand All @@ -389,9 +398,29 @@ def _extract_torch_devices(torch_device_str) -> Tuple[str, Optional[list], bool]
)
return accelerator, gpus, auto_select_gpus

@staticmethod
def _extract_torch_model_params(**kwargs):
@classmethod
def _validate_model_kwargs(cls, **kwargs):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

"""validate that kwargs used at model creation are part of :class:`TorchForecastingModel`,
:class:`PLForecastingModule` or cls __init__ methods.
"""
valid_kwargs = (
set(inspect.signature(TorchForecastingModel.__init__).parameters.keys())
| set(inspect.signature(PLForecastingModule.__init__).parameters.keys())
| set(inspect.signature(cls.__init__).parameters.keys())
)

invalid_kwargs = [kwarg for kwarg in kwargs if kwarg not in valid_kwargs]

raise_if(
len(invalid_kwargs) > 0,
f"Invalid model creation parameters. Model `{cls.__name__}` has no args/kwargs `{invalid_kwargs}`",
logger=logger,
)

@classmethod
def _extract_torch_model_params(cls, **kwargs):
"""extract params from model creation to set up TorchForecastingModels"""
cls._validate_model_kwargs(**kwargs)
get_params = list(
inspect.signature(TorchForecastingModel.__init__).parameters.keys()
)
Expand Down Expand Up @@ -619,6 +648,13 @@ def fit(
override Darts' default trainer.
verbose
Optionally, whether to print progress.

.. deprecated:: v0.17.0
``verbose`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, control verbosity with PyTorch Lightning Trainer parameters ``enable_progress_bar``,
``progress_bar_refresh_rate`` and ``enable_model_summary`` in the ``pl_trainer_kwargs`` dict
at model creation. See for example here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#enable-progress-bar
epochs
If specified, will train the model for ``epochs`` (additional) epochs, irrespective of what ``n_epochs``
was provided to the model constructor.
Expand Down Expand Up @@ -764,6 +800,13 @@ def fit_from_dataset(
override Darts' default trainer.
verbose
Optionally, whether to print progress.

.. deprecated:: v0.17.0
``verbose`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, control verbosity with PyTorch Lightning Trainer parameters ``enable_progress_bar``,
``progress_bar_refresh_rate`` and ``enable_model_summary`` in the ``pl_trainer_kwargs`` dict
at model creation. See for example here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#enable-progress-bar
epochs
If specified, will train the model for ``epochs`` (additional) epochs, irrespective of what ``n_epochs``
was provided to the model constructor.
Expand Down Expand Up @@ -965,6 +1008,13 @@ def predict(
Size of batches during prediction. Defaults to the models' training ``batch_size`` value.
verbose
Optionally, whether to print progress.

.. deprecated:: v0.17.0
``verbose`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, control verbosity with PyTorch Lightning Trainer parameters ``enable_progress_bar``,
``progress_bar_refresh_rate`` and ``enable_model_summary`` in the ``pl_trainer_kwargs`` dict
at model creation. See for example here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#enable-progress-bar
n_jobs
The number of jobs to run in parallel. ``-1`` means using all processors. Defaults to ``1``.
roll_size
Expand Down Expand Up @@ -1084,7 +1134,14 @@ def predict_from_dataset(
batch_size
Size of batches during prediction. Defaults to the models ``batch_size`` value.
verbose
Shows the progress bar for batch predicition. Off by default.
Optionally, whether to print progress.

.. deprecated:: v0.17.0
``verbose`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, control verbosity with PyTorch Lightning Trainer parameters ``enable_progress_bar``,
``progress_bar_refresh_rate`` and ``enable_model_summary`` in the ``pl_trainer_kwargs`` dict
at model creation. See for example here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#enable-progress-bar
n_jobs
The number of jobs to run in parallel. ``-1`` means using all processors. Defaults to ``1``.
roll_size
Expand Down
9 changes: 8 additions & 1 deletion darts/models/forecasting/transformer_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,8 +289,15 @@ def __init__(
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
Optionally, a string indicating the torch device to use. (default: ``None``. Set "cuda:0" if a GPU
is available, otherwise "cpu")

.. deprecated:: v0.17.0
``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version.
Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your
``pl_trainer_kwargs`` dict. For more info, see here:
https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and
https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices
force_reset
If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
Expand Down
79 changes: 79 additions & 0 deletions darts/tests/models/forecasting/test_torch_forecasting_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
logger = get_logger(__name__)

try:
import torch

from darts.models.forecasting.rnn_model import RNNModel

TORCH_AVAILABLE = True
Expand Down Expand Up @@ -285,3 +287,80 @@ def test_train_from_10_n_epochs_20_fit_15_epochs(self):

model1.fit(series, epochs=15)
self.assertEqual(15, model1.epochs_trained)

def test_optimizers(self):
times = pd.date_range("20130101", "20130410")
pd_series = pd.Series(range(100), index=times)
series = TimeSeries.from_series(pd_series)

optimizers = [
(torch.optim.Adam, {"lr": 0.001}),
(torch.optim.SGD, {"lr": 0.001}),
]

for optim_cls, optim_kwargs in optimizers:
model = RNNModel(
12,
"RNN",
10,
10,
optimizer_cls=optim_cls,
optimizer_kwargs=optim_kwargs,
)
# should not raise an error
model.fit(series, epochs=1)

def test_lr_schedulers(self):
times = pd.date_range("20130101", "20130410")
pd_series = pd.Series(range(100), index=times)
series = TimeSeries.from_series(pd_series)

lr_schedulers = [
(torch.optim.lr_scheduler.LinearLR, {}),
(
torch.optim.lr_scheduler.ReduceLROnPlateau,
{"threshold": 0.001, "monitor": "train_loss"},
),
(torch.optim.lr_scheduler.ExponentialLR, {"gamma": 0.09}),
]

for lr_scheduler_cls, lr_scheduler_kwargs in lr_schedulers:
model = RNNModel(
12,
"RNN",
10,
10,
lr_scheduler_cls=lr_scheduler_cls,
lr_scheduler_kwargs=lr_scheduler_kwargs,
)
# should not raise an error
model.fit(series, epochs=1)

def test_devices(self):
torch_devices = [
("cpu", ("cpu", None, False)),
("cuda:0", ("gpu", [0], False)),
("cuda", ("gpu", -1, True)),
("auto", ("auto", None, False)),
]

for torch_device, settings in torch_devices:
accelerator, gpus, auto_select_gpus = settings
model = RNNModel(12, "RNN", 10, 10, torch_device_str=torch_device)

self.assertEqual(model.trainer_params["accelerator"], accelerator)
self.assertEqual(model.trainer_params["gpus"], gpus)
self.assertEqual(
model.trainer_params["auto_select_gpus"], auto_select_gpus
)

def test_wrong_model_creation_params(self):
valid_kwarg = {"pl_trainer_kwargs": {}}
invalid_kwarg = {"some_invalid_kwarg": None}

# valid params should not raise an error
_ = RNNModel(12, "RNN", 10, 10, **valid_kwarg)

# invalid params should raise an error
with self.assertRaises(ValueError):
_ = RNNModel(12, "RNN", 10, 10, **invalid_kwarg)