Skip to content

Commit

Permalink
Remove deprecations for 0.6.0 release (#476)
Browse files Browse the repository at this point in the history
* Remove deprecated f argument from save_params

Use f_params instead.

* Remove deprecated save_history and load_history

Now use save_params with f_history keyword.

* Remove deprecated filter_requires_grad and filtered_optimizers

Now use skorch.callbacks.Freezer if you want to freeze parameters.

* Remove deprecated "wrong" slice of history

It used to be possible to do history[j, 'batches', somekey] but that
was inconsistent and should really be history[j, 'batches', :,
somekey] because the 2nd to last key relates to the batch indices, not
the batch keys. This inconsistent behavior is no longer supported.

* Remove support for unpickling old skorch nets

With PyTorch 1.1.0, we can no longer unpickle old models that have an
optimizer (i.e. basically all models). Therefore, we may as well drop
the support for unpickling old files now, when moving to PyTorch
1.1.0.

* Remove target argument from Checkpoint

Use f_params instead.

* Remove deprecated device argument from Dataset

This argument didn't do anything. Now passing it raises an error.

* Change TrainEndCheckpoint fn_prefix default to 'train_end_'

It used to be 'final_'.

* Bump up PyTorch version to 1.1.0 in travis.yml

We need it for the CyclicLR implemenation in PyTorch.
  • Loading branch information
BenjaminBossan committed May 21, 2019
1 parent 8f7fa15 commit 4d68086
Show file tree
Hide file tree
Showing 12 changed files with 11 additions and 446 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Expand Up @@ -5,7 +5,7 @@ python:
- "3.6"
- "3.5"
env:
- PYTORCH_VERSION="1.0.0"
- PYTORCH_VERSION="1.1.0"

cache:
apt: true
Expand Down
1 change: 1 addition & 0 deletions CHANGES.md
Expand Up @@ -22,6 +22,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
`net.set_params(criterion__weight=w)`
- skorch pickle format changed in order to improve CUDA compatibility, if you have pickled models, please re-pickle them to be able to load them in the future
- `net.criterion_` and its parameters are now moved to target device when using criteria that inherit from `torch.nn.Module`. Previously the user had to make sure that parameters such as class weight are on the compute device
- skorch now assumes PyTorch >= 1.1.0. This mainly affects learning rate schedulers, whose inner workings have been changed with version 1.1.0. This update will also invalidate pickled skorch models after a change introduced in PyTorch optimizers.

### Fixed

Expand Down
8 changes: 4 additions & 4 deletions README.rst
Expand Up @@ -213,10 +213,10 @@ If you want to help developing, run:
PyTorch
=======

PyTorch is not covered by the dependencies, since the PyTorch
version you need is dependent on your system. For installation
instructions for PyTorch, visit the `PyTorch website
<http://pytorch.org/>`__.
PyTorch is not covered by the dependencies, since the PyTorch version
you need is dependent on your system. For installation instructions
for PyTorch, visit the `PyTorch website <http://pytorch.org/>`__. The
current version of skorch assumes PyTorch >= 1.1.0.

In general, this should work (assuming CUDA 9):

Expand Down
26 changes: 3 additions & 23 deletions skorch/callbacks/training.py
Expand Up @@ -61,8 +61,6 @@ class Checkpoint(Callback):
Parameters
----------
target : deprecated
monitor : str, function, None
Value of the history to monitor or callback that determines
whether this epoch should lead to a checkpoint. The callback
Expand Down Expand Up @@ -121,10 +119,10 @@ class Checkpoint(Callback):
The target that the information about created checkpoints is
sent to. This can be a logger or ``print`` function (to send to
stdout). By default the output is discarded.
"""
def __init__(
self,
target=None,
monitor='valid_loss_best',
f_params='params.pt',
f_optimizer='optimizer.pt',
Expand All @@ -135,14 +133,6 @@ def __init__(
event_name='event_cp',
sink=noop,
):
if target is not None:
warnings.warn(
"target argument was renamed to f_params and will be removed "
"in the next release. To make your code future-proof it is "
"recommended to explicitly specify keyword arguments' names "
"instead of relying on positional order.",
DeprecationWarning)
f_params = target
self.monitor = monitor
self.f_params = f_params
self.f_optimizer = f_optimizer
Expand Down Expand Up @@ -656,13 +646,10 @@ class TrainEndCheckpoint(Callback):
Supports the same format specifiers as ``f_params``.
fn_prefix: str (default='final_')
fn_prefix: str (default='train_end_')
Prefix for filenames. If ``f_params``, ``f_optimizer``, ``f_history``,
or ``f_pickle`` are strings, they will be prefixed by ``fn_prefix``.
``fn_prefix`` default value will change from 'final_'
to 'train_end_' in 0.5.0.
dirname: str (default='')
Directory where files are stored.
Expand All @@ -677,17 +664,10 @@ def __init__(
f_optimizer='optimizer.pt',
f_history='history.json',
f_pickle=None,
fn_prefix=None,
fn_prefix='train_end_',
dirname='',
sink=noop,
):
# TODO: Remove warning in release 0.5.0
if fn_prefix is None:
warnings.warn(
"'fn_prefix' default value will change from 'final_' "
"to 'train_end_' in 0.5.0", FutureWarning)
fn_prefix = 'final_'

self.f_params = f_params
self.f_optimizer = f_optimizer
self.f_history = f_history
Expand Down
7 changes: 0 additions & 7 deletions skorch/dataset.py
Expand Up @@ -149,15 +149,8 @@ def __init__(
self,
X,
y=None,
device=None,
length=None,
):
# TODO: Remove warning in release 0.4
if device is not None:
warnings.warn(
"device is no longer needed by Dataset and will be ignored.",
DeprecationWarning)

self.X = X
self.y = y

Expand Down
49 changes: 0 additions & 49 deletions skorch/helper.py
Expand Up @@ -131,55 +131,6 @@ def __ne__(self, other):
return not self.__eq__(other)


# TODO: remove in 0.5.0
def filter_requires_grad(pgroups):
"""Returns parameter groups where parameters
that don't require a gradient are filtered out.
Parameters
----------
pgroups : dict
Parameter groups to be filtered
"""
warnings.warn(
"For filtering gradients, please use skorch.callbacks.Freezer.",
DeprecationWarning)

for pgroup in pgroups:
output = {k: v for k, v in pgroup.items() if k != 'params'}
output['params'] = (p for p in pgroup['params'] if p.requires_grad)
yield output


# TODO: remove in 0.5.0
def filtered_optimizer(optimizer, filter_fn):
"""Wraps an optimizer that filters out parameters where
``filter_fn`` over ``pgroups`` returns ``False``.
This function can be used, for example, to filter parameters
that do not require a gradient:
>>> from skorch.helper import filtered_optimizer, filter_requires_grad
>>> optimizer = filtered_optimizer(torch.optim.SGD, filter_requires_grad)
>>> net = NeuralNetClassifier(module, optimizer=optimizer)
Parameters
----------
optimizer : torch optim (class)
The uninitialized optimizer that is wrapped
filter_fn : function
Use this function to filter parameter groups before passing
it to ``optimizer``.
"""
warnings.warn(
"For filtering gradients, please use skorch.callbacks.Freezer.",
DeprecationWarning)

return partial(_make_optimizer, optimizer=optimizer, filter_fn=filter_fn)


def predefined_split(dataset):
"""Uses ``dataset`` for validiation in ``NeutralNet``.
Expand Down
16 changes: 0 additions & 16 deletions skorch/history.py
Expand Up @@ -52,22 +52,6 @@ def _unpack_index(i):
# fill trailing indices with None
i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i)))

# handle special case of
# history[j, 'batches', somekey]
# which should really be
# history[j, 'batches', :, somekey]
if i_b is not None and not isinstance(i_b, (int, slice)):
if k_b is not None:
raise KeyError("The last argument '{}' is invalid; it must be a "
"string or tuple of strings.".format(k_b))
warnings.warn(
"Argument 3 to history slicing must be of type int or slice, e.g. "
"history[:, 'batches', 'train_loss'] should be "
"history[:, 'batches', :, 'train_loss'].",
DeprecationWarning,
)
i_b, k_b = slice(None), i_b

return i_e, k_e, i_b, k_b


Expand Down
95 changes: 2 additions & 93 deletions skorch/net.py
Expand Up @@ -1416,33 +1416,13 @@ def __getstate__(self):

return state

# TODO: remove this with the next release
def __setstate_050__(self, load_kwargs, state):
warnings.warn(
"This pickle file will stop working in the next release since "
"the data format changed. Please re-pickle the model to avoid "
"any issues in the future.", DeprecationWarning)
# workaround for cuda_dependent_attributes_ being misused as storage
# during __getstate__ in skorch <= 0.5.0.
original_cuda_dependent_attributes = self.cuda_dependent_attributes_
with tempfile.SpooledTemporaryFile() as f:
f.write(state['cuda_dependent_attributes_'])
f.seek(0)
cuda_attrs = torch.load(f, **load_kwargs)
state.update(cuda_attrs)
state['cuda_dependent_attributes_'] = original_cuda_dependent_attributes
self.__dict__.update(state)

def __setstate__(self, state):
# get_map_location will automatically choose the
# right device in cases where CUDA is not available.
map_location = get_map_location(state['device'])
load_kwargs = {'map_location': map_location}
state['device'] = self._check_device(state['device'], map_location)

if '__cuda_dependent_attributes__' not in state:
return self.__setstate_050__(load_kwargs, state)

with tempfile.SpooledTemporaryFile() as f:
f.write(state['__cuda_dependent_attributes__'])
f.seek(0)
Expand All @@ -1454,7 +1434,7 @@ def __setstate__(self, state):
self.__dict__.update(state)

def save_params(
self, f=None, f_params=None, f_optimizer=None, f_history=None):
self, f_params=None, f_optimizer=None, f_history=None):
"""Saves the module's parameters, history, and optimizer,
not the whole object.
Expand All @@ -1474,8 +1454,6 @@ def save_params(
f_history : file-like object, str, None (default=None)
Path to history. Pass ``None`` to not save
f : deprecated
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
Expand All @@ -1488,17 +1466,6 @@ def save_params(
>>> f_history='history.json')
"""

# TODO: Remove warning in a future release
if f is not None:
warnings.warn(
"f argument was renamed to f_params and will be removed "
"in the next release. To make your code future-proof it is "
"recommended to explicitly specify keyword arguments' names "
"instead of relying on positional order.",
DeprecationWarning)
f_params = f

if f_params is not None:
if not hasattr(self, 'module_'):
raise NotInitializedError(
Expand Down Expand Up @@ -1536,7 +1503,7 @@ def _check_device(self, requested_device, map_device):
return requested_device

def load_params(
self, f=None, f_params=None, f_optimizer=None, f_history=None,
self, f_params=None, f_optimizer=None, f_history=None,
checkpoint=None):
"""Loads the the module's parameters, history, and optimizer,
not the whole object.
Expand All @@ -1562,8 +1529,6 @@ def load_params(
path is passed in, the ``f_*`` will be loaded. Pass
``None`` to not load.
f : deprecated
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
Expand All @@ -1581,14 +1546,6 @@ def _get_state_dict(f):
self.device = self._check_device(self.device, map_location)
return torch.load(f, map_location=map_location)

# TODO: Remove warning in a future release
if f is not None:
warnings.warn(
"f is deprecated in save_params and will be removed in the "
"next release, please use f_params instead",
DeprecationWarning)
f_params = f

if f_history is not None:
self.history = History.from_file(f_history)

Expand Down Expand Up @@ -1617,54 +1574,6 @@ def _get_state_dict(f):
state_dict = _get_state_dict(f_optimizer)
self.optimizer_.load_state_dict(state_dict)

def save_history(self, f):
"""Saves the history of ``NeuralNet`` as a json file. In order
to use this feature, the history must only contain JSON encodable
Python data structures. Numpy and PyTorch types should not
be in the history.
Parameters
----------
f : file-like object or str
Examples
--------
>>> before = NeuralNetClassifier(mymodule)
>>> before.fit(X, y, epoch=2) # Train for 2 epochs
>>> before.save_params('path/to/params')
>>> before.save_history('path/to/history.json')
>>> after = NeuralNetClassifier(mymodule).initialize()
>>> after.load_params('path/to/params')
>>> after.load_history('path/to/history.json')
>>> after.fit(X, y, epoch=2) # Train for another 2 epochs
"""
# TODO: Remove warning in a future release
warnings.warn(
"save_history is deprecated and will be removed in the next "
"release, please use save_params with the f_history keyword",
DeprecationWarning)

self.history.to_file(f)

def load_history(self, f):
"""Load the history of a ``NeuralNet`` from a json file. See
``save_history`` for examples.
Parameters
----------
f : file-like object or str
"""
# TODO: Remove warning in a future release
warnings.warn(
"load_history is deprecated and will be removed in the next "
"release, please use load_params with the f_history keyword",
DeprecationWarning)

self.history = History.from_file(f)

def __repr__(self):
params = self.get_params(deep=False)

Expand Down
7 changes: 0 additions & 7 deletions skorch/tests/callbacks/test_training.py
Expand Up @@ -280,13 +280,6 @@ def test_save_no_targets(
assert save_params_mock.call_count == 0
assert pickle_dump_mock.call_count == 0

def test_target_argument(self, net_cls, checkpoint_cls):
# TODO: remove this test when the target argument is removed
# after its deprecation grace period is over.
with pytest.warns(DeprecationWarning):
checkpoint = checkpoint_cls(target='foobar.pt')
assert checkpoint.f_params == 'foobar.pt'

def test_warnings_when_monitor_appears_in_history(
self, net_cls, checkpoint_cls, save_params_mock, data):
net = net_cls(callbacks=[
Expand Down

0 comments on commit 4d68086

Please sign in to comment.