Skip to content
This repository has been archived by the owner on Apr 19, 2023. It is now read-only.

Commit

Permalink
Merge branch 'master' of github.com:inferno-pytorch/inferno
Browse files Browse the repository at this point in the history
  • Loading branch information
Steffen-Wolf committed Jul 31, 2019
2 parents 9af9e63 + 34f3ae7 commit 7be8093
Show file tree
Hide file tree
Showing 28 changed files with 269 additions and 208 deletions.
9 changes: 7 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,15 @@ python:
- 3.7

env:
# FIXME multi-processing hangs with pytorch 1.0 and uinttest test discovery
# - PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch
- PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch
- PYTORCH_CONDA="pytorch=0.4.1" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch

# exclude hanging build
matrix:
exclude:
- python: 3.6
env: PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch

install:
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
- bash miniconda.sh -b -p $HOME/miniconda
Expand Down
10 changes: 5 additions & 5 deletions docs/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ With our model built, it's time to worry about the data generators. Or is it?
.. code:: python
from inferno.io.box.cifar import get_cifar10_loaders
train_loader, validate_loader = get_cifar10_loaders('path/to/cifar10',
download=True,
train_batch_size=128,
train_loader, validate_loader = get_cifar10_loaders('path/to/cifar10',
download=True,
train_batch_size=128,
test_batch_size=100)
CIFAR-10 works out-of-the-`box` (pun very much intended) with all the fancy data-augmentation and normalization. Of course, it's perfectly fine if you have your own [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader).
Expand Down Expand Up @@ -117,7 +117,7 @@ or
trainer.build_metric(MyMetric, **my_metric_kwargs)
Note that the metric applies to `torch.Tensor`s, and not on `torch.autograd.Variable`s. Also, a metric might be way too expensive to evaluate every training iteration without slowing down the training. If this is the case and you'd like to evaluate the metric every (say) 10 *training* iterations:
A metric might be way too expensive to evaluate every training iteration without slowing down the training. If this is the case and you'd like to evaluate the metric every (say) 10 *training* iterations:

.. code:: python
Expand Down Expand Up @@ -254,7 +254,7 @@ Inferno supports logging scalars and images to Tensorboard out-of-the-box, thoug
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
trainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
log_images_every=(20, 'iterations')),
log_directory='/path/to/log/directory')
Expand Down
4 changes: 1 addition & 3 deletions inferno/extensions/containers/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def copy(self, **init_kwargs):
return new



class Graph(nn.Module):
"""
A graph structure to build networks with complex architectures. The resulting graph model
Expand All @@ -64,7 +63,6 @@ class Graph(nn.Module):
>>> from inferno.extensions.layers.reshape import Concatenate
>>> from inferno.extensions.layers.convolutional import ConvELU2D
>>> import torch
>>> from torch.autograd import Variable
>>> # Build the model
>>> inception_module = Graph()
>>> inception_module.add_input_node('input')
Expand All @@ -75,7 +73,7 @@ class Graph(nn.Module):
>>> previous=['conv1x1', 'conv3x3', 'conv5x5'])
>>> inception_module.add_output_node('output', 'cat')
>>> # Build dummy variable
>>> input = Variable(torch.rand(1, 64, 100, 100))
>>> input = torch.rand(1, 64, 100, 100)
>>> # Get output
>>> output = inception_module(input)
Expand Down
3 changes: 1 addition & 2 deletions inferno/extensions/criteria/elementwise_measures.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import torch.nn as nn
from torch.autograd import Variable
from ...utils.exceptions import assert_


Expand All @@ -26,5 +25,5 @@ def forward(self, input, target):
# Get final weight by adding weight differential to a tensor with negative weights
weights = weight_differential.add_(self.NEGATIVE_CLASS_WEIGHT)
# `weights` should be positive if NEGATIVE_CLASS_WEIGHT is not messed with.
sqrt_weights = Variable(weights.sqrt_(), requires_grad=False)
sqrt_weights = weights.sqrt_()
return self.mse(input * sqrt_weights, target * sqrt_weights)
16 changes: 7 additions & 9 deletions inferno/extensions/criteria/set_similarity_measures.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch.nn as nn
from ...utils.torch_utils import flatten_samples
from torch.autograd import Variable

__all__ = ['SorensenDiceLoss', 'GeneralizedDiceLoss']

Expand Down Expand Up @@ -53,11 +52,9 @@ def forward(self, input, target):
# With pytorch < 0.2, channelwise_loss.size = (C, 1).
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
# Wrap weights in a variable
weight = Variable(self.weight, requires_grad=False)
assert weight.size() == channelwise_loss.size()
assert self.weight.size() == channelwise_loss.size()
# Apply weight
channelwise_loss = weight * channelwise_loss
channelwise_loss = self.weight * channelwise_loss
# Sum over the channels to compute the total loss
loss = channelwise_loss.sum()
return loss
Expand Down Expand Up @@ -104,7 +101,7 @@ def forward(self, input, target):
else:
def flatten_and_preserve_channels(tensor):
tensor_dim = tensor.dim()
assert tensor_dim >= 3
assert tensor_dim >= 3
num_channels = tensor.size(1)
num_classes = tensor.size(2)
# Permute the channel axis to first
Expand All @@ -131,10 +128,11 @@ def flatten_and_preserve_channels(tensor):
if self.weight is not None:
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
channel_weights = Variable(self.weight, requires_grad=False)
assert channel_weights.size() == channelwise_loss.size(), "`weight` should have shape (nb_channels, ), `target` should have shape (batch_size, nb_channels, nb_classes, ...)"
assert self.weight.size() == channelwise_loss.size(),\
"""`weight` should have shape (nb_channels, ),
`target` should have shape (batch_size, nb_channels, nb_classes, ...)"""
# Apply channel weights:
channelwise_loss = channel_weights * channelwise_loss
channelwise_loss = self.weight * channelwise_loss

loss = channelwise_loss.sum()

Expand Down
7 changes: 0 additions & 7 deletions inferno/extensions/initializers/presets.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import numpy as np
import torch.nn.init as init
from torch.autograd import Variable
from functools import partial

from .base import Initialization, Initializer
Expand All @@ -19,8 +18,6 @@ def __init__(self, constant):
self.constant = constant

def call_on_tensor(self, tensor):
if isinstance(tensor, Variable):
tensor = tensor.data
tensor.fill_(self.constant)
return tensor

Expand All @@ -42,9 +39,6 @@ def compute_fan_in(self, tensor):
return np.prod(list(tensor.size())[1:])

def call_on_weight(self, tensor):
if isinstance(tensor, Variable):
self.call_on_weight(tensor.data)
return tensor
# Compute stddev if required
if self.sqrt_gain_over_fan_in is not None:
stddev = self.stddev * \
Expand Down Expand Up @@ -85,4 +79,3 @@ def __init__(self):
super(ELUWeightsZeroBias, self)\
.__init__(weight_initializer=NormalWeights(sqrt_gain_over_fan_in=1.5505188080679277),
bias_initializer=Constant(0.))

29 changes: 11 additions & 18 deletions inferno/extensions/models/unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,14 +119,18 @@ def __init__(self, in_channels, dim, out_channels=None, depth=3,
assert len(self.n_channels_per_output) == self._store_conv_down.count(True) + \
self._store_conv_up.count(True) + int(self._store_conv_bottom)

def _get_num_channels(self, depth):
assert depth > 0
return self.in_channels * self.gain**depth

def _init__downstream(self):
conv_down_ops = []
self._store_conv_down = []

current_in_channels = self.in_channels

for i in range(self.depth):
out_channels = current_in_channels * self.gain
out_channels = self._get_num_channels(i + 1)
op, return_op_res = self.conv_op_factory(in_channels=current_in_channels,
out_channels=out_channels,
part='down', index=i)
Expand All @@ -138,7 +142,7 @@ def _init__downstream(self):
self._store_conv_down.append(False)

# increase the number of channels
current_in_channels *= self.gain
current_in_channels = out_channels

# store as proper torch ModuleList
self._conv_down_ops = nn.ModuleList(conv_down_ops)
Expand All @@ -147,9 +151,7 @@ def _init__downstream(self):

def _init__bottom(self):

conv_up_ops = []

current_in_channels = self.in_channels* self.gain**self.depth
current_in_channels = self._get_num_channels(self.depth)

factory_res = self.conv_op_factory(in_channels=current_in_channels,
out_channels=current_in_channels, part='bottom', index=0)
Expand All @@ -163,12 +165,12 @@ def _init__bottom(self):

def _init__upstream(self):
conv_up_ops = []
current_in_channels = self.in_channels * self.gain**self.depth
current_in_channels = self._get_num_channels(self.depth)

for i in range(self.depth):
# the number of out channels (set to self.out_channels for last decoder)
out_channels = self.out_channels if i +1 == self.depth else\
current_in_channels // self.gain
out_channels = self.out_channels if i + 1 == self.depth else \
self._get_num_channels(self.depth - i - 1)

# if not residual we concat which needs twice as many channels
fac = 1 if self.residual else 2
Expand All @@ -186,7 +188,7 @@ def _init__upstream(self):
self._store_conv_up.append(False)

# decrease the number of input_channels
current_in_channels //= self.gain
current_in_channels = out_channels

# store as proper torch ModuleLis
self._conv_up_ops = nn.ModuleList(conv_up_ops)
Expand Down Expand Up @@ -311,15 +313,6 @@ def upsample_op_factory(self, index):\
return InfernoUpsample(**self._upsample_kwargs)
#return nn.Upsample(**self._upsample_kwargs)

def pre_conv_op_regularizer_factory(self, in_channels, out_channels, part, index):
if self.use_dropout and in_channels > 2:
return self._channel_dropout_op(x)
else:
return Identity()

def post_conv_op_regularizer_factory(self, in_channels, out_channels, part, index):
return Identity()

def conv_op_factory(self, in_channels, out_channels, part, index):
raise NotImplementedError("conv_op_factory need to be implemented by deriving class")

Expand Down

0 comments on commit 7be8093

Please sign in to comment.