Skip to content

Commit

Permalink
WIP: Update to 0.4.1 (#298)
Browse files Browse the repository at this point in the history
* ENH: Update to 0.4.1

* RFC: Updates to torch.utils.data.Subset

* ENH: Update to references to 0.4.1

* DOC: Updates notebook
  • Loading branch information
thomasjpfan authored and ottonemo committed Aug 1, 2018
1 parent 2ac11de commit 40f23d2
Show file tree
Hide file tree
Showing 13 changed files with 264 additions and 275 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Expand Up @@ -24,6 +24,6 @@ install:
- source activate skorch-env
- conda install --file=requirements-dev.txt
- python setup.py install
- conda install -c pytorch 'pytorch-cpu>=0.4.0'
- conda install -c pytorch 'pytorch-cpu>=0.4.1'
script:
- pytest
4 changes: 2 additions & 2 deletions Dockerfile
@@ -1,12 +1,12 @@
FROM nvidia/cuda:8.0-cudnn7-devel
FROM nvidia/cuda:9.0-cudnn7-runtime

RUN apt-get update && \
apt-get install -y python3.5-dev vim git g++ sudo zip python3-setuptools
RUN easy_install3 --upgrade pip setuptools

ENV PIP_CACHE_DIR=/cache PYTHONDONTWRITEBYTECODE=1

RUN pip3 install http://download.pytorch.org/whl/cu90/torch-0.4.0-cp35-cp35m-linux_x86_64.whl
RUN pip3 install torch
RUN pip3 install torchvision

WORKDIR /app
Expand Down
2 changes: 1 addition & 1 deletion README.rst
Expand Up @@ -212,7 +212,7 @@ In general, this should work (assuming CUDA 9):
# using conda:
conda install pytorch cuda90 -c pytorch
# using pip
pip install http://download.pytorch.org/whl/cu90/torch-0.4.0-cp36-cp36m-linux_x86_64.whl
pip install torch
=============
Communication
Expand Down
2 changes: 1 addition & 1 deletion docs/requirements.txt
@@ -1,4 +1,4 @@
numpy>=1.13.3
http://download.pytorch.org/whl/cpu/torch-0.4.0-cp35-cp35m-linux_x86_64.whl
torch==0.4.1
sphinx-rtd-theme==0.4.0
numpydoc==0.8.0
154 changes: 78 additions & 76 deletions notebooks/Advanced_Usage.ipynb

Large diffs are not rendered by default.

333 changes: 160 additions & 173 deletions notebooks/Basic_Usage.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion skorch/__init__.py
Expand Up @@ -10,7 +10,7 @@
from . import callbacks


MIN_TORCH_VERSION = '0.4.0'
MIN_TORCH_VERSION = '0.4.1'

try:
# pylint: disable=wrong-import-position
Expand Down
2 changes: 1 addition & 1 deletion skorch/classifier.py
Expand Up @@ -352,7 +352,7 @@ def predict_proba(self, X):
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
if bce_logits_loss:
yp = torch.nn.functional.sigmoid(yp)
yp = torch.sigmoid(yp)
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba
8 changes: 4 additions & 4 deletions skorch/dataset.py
Expand Up @@ -50,10 +50,10 @@ def get_len(data):
def uses_placeholder_y(ds):
"""If ``ds`` is a ``skorch.dataset.Dataset`` or a
``skorch.dataset.Dataset`` nested inside a
``torch.utils.data.dataset.Subset`` and uses
``torch.utils.data.Subset`` and uses
y as a placeholder, return ``True``."""

if isinstance(ds, torch.utils.data.dataset.Subset):
if isinstance(ds, torch.utils.data.Subset):
return uses_placeholder_y(ds.dataset)
return isinstance(ds, Dataset) and ds.y is None

Expand Down Expand Up @@ -274,8 +274,8 @@ def __call__(self, dataset, y=None, groups=None):
args = args + (to_numpy(y),)

idx_train, idx_valid = next(iter(cv.split(*args, groups=groups)))
dataset_train = torch.utils.data.dataset.Subset(dataset, idx_train)
dataset_valid = torch.utils.data.dataset.Subset(dataset, idx_valid)
dataset_train = torch.utils.data.Subset(dataset, idx_train)
dataset_valid = torch.utils.data.Subset(dataset, idx_valid)
return dataset_train, dataset_valid

def __repr__(self):
Expand Down
8 changes: 4 additions & 4 deletions skorch/tests/test_classifier.py
Expand Up @@ -84,10 +84,10 @@ def test_predict_and_predict_proba(self, net_fit, data):
X = data[0]

y_proba = net_fit.predict_proba(X)
assert np.allclose(y_proba.sum(1), 1, rtol=1e-7)
assert np.allclose(y_proba.sum(1), 1, rtol=1e-5)

y_pred = net_fit.predict(X)
assert np.allclose(np.argmax(y_proba, 1), y_pred, rtol=1e-7)
assert np.allclose(np.argmax(y_proba, 1), y_pred, rtol=1e-5)

# classifier-specific test
def test_takes_log_with_nllloss(self, net_cls, module_cls, data):
Expand Down Expand Up @@ -212,7 +212,7 @@ def test_target_2d_raises(self, net, data):
def test_custom_loss_does_not_call_sigmoid(
self, net_cls, data, module_cls, monkeypatch):
mock = Mock(side_effect=lambda x: x)
monkeypatch.setattr(torch.nn.functional, "sigmoid", mock)
monkeypatch.setattr(torch, "sigmoid", mock)

net = net_cls(module_cls, max_epochs=1, lr=0.1, criterion=nn.MSELoss)
X, y = data
Expand All @@ -224,7 +224,7 @@ def test_custom_loss_does_not_call_sigmoid(
def test_default_loss_does_call_sigmoid(
self, net_cls, data, module_cls, monkeypatch):
mock = Mock(side_effect=lambda x: x)
monkeypatch.setattr(torch.nn.functional, "sigmoid", mock)
monkeypatch.setattr(torch, "sigmoid", mock)

net = net_cls(module_cls, max_epochs=1, lr=0.1)
X, y = data
Expand Down
2 changes: 1 addition & 1 deletion skorch/tests/test_dataset.py
Expand Up @@ -503,7 +503,7 @@ def __iter__(self):

@pytest.fixture
def data(self):
X = torch.arange(0, 12).view(4, 3)
X = torch.arange(0, 12, dtype=torch.float32).view(4, 3)
y = torch.LongTensor([0, 1, 1, 0])
return X, y

Expand Down
8 changes: 4 additions & 4 deletions skorch/tests/test_net.py
Expand Up @@ -409,14 +409,14 @@ def test_set_params_works(self, net, data):

net.set_params(
module__num_units=20,
module__nonlin=F.tanh,
module__nonlin=torch.tanh,
lr=0.2,
)
net.fit(X, y)

assert net.module_.dense0.out_features == 20
assert net.module_.dense1.in_features == 20
assert net.module_.nonlin is F.tanh
assert net.module_.nonlin is torch.tanh
assert np.isclose(net.lr, 0.2)

def test_set_params_then_initialize_remembers_param(
Expand Down Expand Up @@ -493,13 +493,13 @@ def test_module_params_in_init(self, net_cls, module_cls, data):
net = net_cls(
module=module_cls,
module__num_units=20,
module__nonlin=F.tanh,
module__nonlin=torch.tanh,
)
net.fit(X, y)

assert net.module_.dense0.out_features == 20
assert net.module_.dense1.in_features == 20
assert net.module_.nonlin is F.tanh
assert net.module_.nonlin is torch.tanh

def test_module_initialized_with_partial_module(self, net_cls, module_cls):
net = net_cls(partial(module_cls, num_units=123))
Expand Down
12 changes: 6 additions & 6 deletions skorch/utils.py
Expand Up @@ -68,11 +68,11 @@ def to_tensor(X, device):
elif isinstance(X, (list, tuple)):
return [to_tensor_(x) for x in X]
elif np.isscalar(X):
return torch.tensor(X).to(device)
return torch.as_tensor(X, device=device)
elif isinstance(X, Sequence):
return torch.tensor(np.array(X)).to(device)
return torch.as_tensor(np.array(X), device=device)
elif isinstance(X, np.ndarray):
return torch.tensor(X).to(device)
return torch.as_tensor(X, device=device)
elif isinstance(X, nn.utils.rnn.PackedSequence):
return X
else:
Expand Down Expand Up @@ -327,9 +327,9 @@ def data_from_dataset(dataset, X_indexing=None, y_indexing=None):
Parameters
----------
dataset : skorch.dataset.Dataset or torch.utils.data.dataset.Subset
dataset : skorch.dataset.Dataset or torch.utils.data.Subset
The incoming dataset should be a ``skorch.dataset.Dataset`` or a
``torch.utils.data.dataset.Subset`` of a
``torch.utils.data.Subset`` of a
``skorch.dataset.Dataset``.
X_indexing : function/callable or None (default=None)
Expand Down Expand Up @@ -359,7 +359,7 @@ def data_from_dataset(dataset, X_indexing=None, y_indexing=None):
def is_skorch_dataset(ds):
"""Checks if the supplied dataset is an instance of
``skorch.dataset.Dataset`` even when it is nested inside
``torch.util.data.dataset.Subset``."""
``torch.util.data.Subset``."""
from skorch.dataset import Dataset
if isinstance(ds, Subset):
return is_skorch_dataset(ds.dataset)
Expand Down

0 comments on commit 40f23d2

Please sign in to comment.