Skip to content
This repository has been archived by the owner on Apr 19, 2023. It is now read-only.

Commit

Permalink
Merge pull request #156 from inferno-pytorch/super-dev
Browse files Browse the repository at this point in the history
Super dev
  • Loading branch information
constantinpape committed Dec 19, 2018
2 parents 84e22bc + 477a4b8 commit f18a526
Show file tree
Hide file tree
Showing 62 changed files with 533 additions and 272 deletions.
47 changes: 8 additions & 39 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
language: python

dist: xenial

python:
- 3.6
- 3.7

env:
- PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch
- PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision=0.1.9" TORCHVISION_CHANNEL=soumith
- PYTORCH_CONDA="pytorch=0.3.1" TORCHVISION_CONDA="torchvision=0.1.9" TORCHVISION_CHANNEL=soumith
# FIXME pytorch 1.0 multi-processing is broken ...
# - PYTORCH_CONDA="pytorch" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch
- PYTORCH_CONDA="pytorch=0.4.1" TORCHVISION_CONDA="torchvision" TORCHVISION_CHANNEL=pytorch

install:
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
Expand All @@ -16,10 +19,9 @@ install:
- conda update -q conda
- conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
- source activate test-environment
- conda install -c conda-forge networkx=1.11 h5py numpy scikit-image scipy pyyaml dill cython
- conda install -c conda-forge networkx=1.11 h5py scikit-image pyyaml dill tensorboardx
- conda install -c pytorch $PYTORCH_CONDA
- conda install -c $TORCHVISION_CHANNEL $TORCHVISION_CONDA
- pip install tensorboardX

deploy:
provider: pypi
Expand All @@ -44,38 +46,5 @@ deploy:

script:
- source activate test-environment
- pip install pytest | cat
- pytest tests/test_inferno.py
- python setup.py install

# # This file was autogenerated and will overwrite each time you run travis_pypi_setup.py
# deploy:
# true:
# python: 3.5
# repo: DerThorsten/inferno-1
# tags: true
# distributions: sdist bdist_wheel
# password:
# secure: !!binary |
# cFJOVG9kNW0xNjExaWN2UVZWVGl5RVVjUlBBTnAzV2hoNDdPQUh4TUw3QUFVbDNQVWlCV09wZ3Iv
# anRCcjNrVnVUY0RDTng5dEorNGpNUWhxakN4ZStadEo5Z3N2LytTeEdjUzk1WVFET2k5bHhxcTRX
# UnlrbHd4V2t1UVkzV3NuRGh5d1ZOTUFrcTZWY0VaMEQzVmVVWkFSRDZ4ejRJYlZURlVqb01OaVNI
# LzRKNFVZdmxlOUR0emFiTHBRUjBhUDhQTElIZGRHSVE0ditLZDk2Z3FSYnh0U0Npa01zOVZqWXE0
# c0JiWmUzSy8yZDJXekV2U1JoN0NrWUthblZqclp6SzQ4bmYvcHVib2paS2wwNDYyU2JQM1RuTXh6
# WTh6dHFWb3pZdjViM3FXOEVHYUd0czQzd3hNUkNTNmVOT3QwVk0ycERDdEhWaS9VRTlWTUdlRVln
# ZytVYmJLTWhRellmZlRXNFdBbVVnbFlvSGwxb3poQ2JrZ3AzZEJWZkdTbFdBRmY2OFpGVFUxZTBN
# eXJZQ0UrQkJLd29YNHA5MzNiTzc2NnhRN0F1TnlHaXRDU0VnRmw2Rmp4ME9LY3RBeHczSGE2dFVB
# YVdUWVphd2tQaW9NajdTcVpxYXJQQzFIWmFod2FBbDBRVXpoU3dicW9QYm5zVGlaV2cvRW1HOXlJ
# aXJxa2pSbjdOME9HTmg3N1k4N3ZlWjJQVVVLYTRxR2k4YXlNZW1WeEFCRmFXQklLU1Z2cGFnQ2ZI
# TXRlcGxadXFGejNNeWdpQnV1S1lDNTJXK0d0Y0tITTV1YmNVTzN3aVFRRjZQdmFWaFVQNmVFN2pV
# MXJyRE9Ja01oMXBpODhJSmdWL2NjU2RnOWZjdDdJcEpkZ0NQYzVxMGM2MGpYaklnWUczdkN6Sm89
# user: DerThorsten
# provider: pypi
# #install:
# # #- pip install http://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp35-cp35m-manylinux1_x86_64.whl
# # #- pip install torchvision
# # - pip install --process-dependency-links -U tox-travis
# language: python
# python:
# - 3.5
# #script: tox
- python -m unittest discover -s tests -v
6 changes: 3 additions & 3 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ Current features include:
nn.MaxPool2d(kernel_size=2, stride=2),
Flatten(),
nn.Linear(in_features=(256 * 4 * 4), out_features=10),
nn.Softmax()
nn.LogSoftmax(dim=1)
)
# Load loaders
Expand All @@ -87,15 +87,15 @@ Current features include:
# Build trainer
trainer = Trainer(model) \
.build_criterion('CrossEntropyLoss') \
.build_criterion('NLLLoss') \
.build_metric('CategoricalError') \
.build_optimizer('Adam') \
.validate_every((2, 'epochs')) \
.save_every((5, 'epochs')) \
.save_to_directory(SAVE_DIRECTORY) \
.set_max_num_epochs(10) \
.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),
log_images_every='never'),
log_images_every='never'),
log_directory=LOG_DIRECTORY)
# Bind loaders
Expand Down
4 changes: 2 additions & 2 deletions examples/plot_train_side_loss_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
from inferno.trainers.basic import Trainer

from inferno.extensions.layers.convolutional import Conv2D
from inferno.extensions.model.res_unet import _ResBlock as ResBlock
from inferno.extensions.model import ResBlockUNet
from inferno.extensions.models.res_unet import _ResBlock as ResBlock
from inferno.extensions.models import ResBlockUNet
from inferno.utils.torch_utils import unwrap
from inferno.utils.python_utils import ensure_dir
import pylab
Expand Down
4 changes: 2 additions & 2 deletions examples/plot_unet_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def label_transform(x):
# With :code:`activated=False` we make sure that the last layer
# is not activated since we chain the UNet with a sigmoid
# activation function.
from inferno.extensions.model import ResBlockUNet
from inferno.extensions.models import ResBlockUNet
from inferno.extensions.layers import RemoveSingletonDimension

model = torch.nn.Sequential(
Expand Down Expand Up @@ -199,7 +199,7 @@ def predict(trainer, test_loader, save_dir=None):
# a rather exotic UNet which uses different types
# of convolutions/non-linearities in the different branches
# of the unet
from inferno.extensions.model import UNetBase
from inferno.extensions.models import UNetBase
from inferno.extensions.layers import ConvSELU2D, ConvReLU2D, ConvELU2D, ConvSigmoid2D,Conv2D

class MySimple2DUnet(UNetBase):
Expand Down
6 changes: 5 additions & 1 deletion inferno/extensions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,9 @@
from . import layers
from . import metrics
from . import optimizers
from . import models
# Backward support
from . import models as model

__all__ = ['containers', 'criteria', 'initializers', 'layers', 'metrics', 'optimizers']
__all__ = ['containers', 'criteria', 'initializers', 'layers', 'metrics', 'optimizers',
'models', 'model']
6 changes: 3 additions & 3 deletions inferno/extensions/containers/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def get_module_for_nodes(self, names):
modules.append(module)
return pyu.from_iterable(modules)

def to_device(self, names, target_device, device_ordinal=None, asynchron=False):
def to_device(self, names, target_device, device_ordinal=None, asynchronous=False):
"""Transfer nodes in the network to a specified device."""
names = pyu.to_iterable(names)
for name in names:
Expand All @@ -368,7 +368,7 @@ def to_device(self, names, target_device, device_ordinal=None, asynchron=False):
# Transfer
module_on_device = OnDevice(module, target_device,
device_ordinal=device_ordinal,
asynchron=asynchron)
asynchronous=asynchronous)
setattr(self, name, module_on_device)
return self

Expand Down Expand Up @@ -474,4 +474,4 @@ def forward(self, *inputs):
# Clear payloads for next pass
self.clear_payloads()
# Done.
return pyu.from_iterable(outputs)
return pyu.from_iterable(outputs)
68 changes: 67 additions & 1 deletion inferno/extensions/layers/convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
'BNReLUConv2D', 'BNReLUConv3D',
'BNReLUDepthwiseConv2D',
'ConvSELU2D', 'ConvSELU3D',
'ConvReLU2D', 'ConvReLU3D']
'ConvReLU2D', 'ConvReLU3D',
'BNReLUDilatedConv2D', 'DilatedConv2D',
'GlobalConv2D']
_all = __all__


Expand Down Expand Up @@ -236,6 +238,17 @@ def __init__(self, in_channels, out_channels, kernel_size, dilation=2):
activation='ELU',
initialization=OrthogonalWeightsZeroBias())

class DilatedConv2D(ConvActivation):
"""2D dilated convolutional layer with 'SAME' padding, no activation and orthogonal weight initialization."""
def __init__(self, in_channels, out_channels, kernel_size, dilation=2):
super(DilatedConv2D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
dim=2,
activation=None,
initialization=OrthogonalWeightsZeroBias())


class ConvReLU2D(ConvActivation):
"""2D Convolutional layer with 'SAME' padding, ReLU and Kaiming normal weight initialization."""
Expand Down Expand Up @@ -342,6 +355,21 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1):
initialization=KaimingNormalWeightsZeroBias(0))
self.batchnorm = nn.BatchNorm2d(in_channels)

class BNReLUDilatedConv2D(_BNReLUSomeConv,ConvActivation):
"""
2D dilated convolutional layer with 'SAME' padding, Batch norm, Relu and He
weight initialization.
"""
def __init__(self, in_channels, out_channels, kernel_size, dilation=2):
super(BNReLUDilatedConv2D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dilation=dilation,
dim=2,
activation=nn.ReLU(inplace=True),
initialization=KaimingNormalWeightsZeroBias(0))
self.batchnorm = nn.BatchNorm2d(in_channels)


class BNReLUConv3D(_BNReLUSomeConv, ConvActivation):
"""
Expand Down Expand Up @@ -441,3 +469,41 @@ def __init__(self, in_channels, out_channels, kernel_size):
dim=3,
activation=activation,
initialization=SELUWeightsZeroBias())

class GlobalConv2D(nn.Module):
"""From https://arxiv.org/pdf/1703.02719.pdf
Main idea: we can have a bigger kernel size computationally acceptable
if we separate 2D-conv in 2 1D-convs """
def __init__(self, in_channels, out_channels, kernel_size, local_conv_type, activation=None, use_BN=False, **kwargs):
super(GlobalConv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert isinstance(kernel_size, (int, list, tuple))
if isinstance(kernel_size, int):
kernel_size = (kernel_size,)*2
self.kwargs=kwargs
self.conv1a = local_conv_type(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1), **kwargs)
self.conv1b = local_conv_type(in_channels=self.out_channels, out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]), **kwargs)
self.conv2a = local_conv_type(in_channels=self.in_channels, out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]), **kwargs)
self.conv2b = local_conv_type(in_channels=self.out_channels, out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1), **kwargs)
if use_BN:
self.batchnorm = nn.BatchNorm2d(self.out_channels)
else:
self.batchnorm = None
self.activation = activation
def forward(self, input_):
out1 = self.conv1a(input_)
out1 = self.conv1b(out1)
out2 = self.conv2a(input_)
out2 = self.conv2b(out2)
out = out1.add(1,out2)
if self.activation is not None:
out = self.activation(out)
if self.batchnorm is not None:
out = self.batchnorm(out)
return out
17 changes: 9 additions & 8 deletions inferno/extensions/layers/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,18 @@
__all__ = ['DeviceTransfer', 'OnDevice']
_all = __all__


class DeviceTransfer(nn.Module):
"""Layer to transfer variables to a specified device."""
def __init__(self, target_device, device_ordinal=None, asynchron=False):
def __init__(self, target_device, device_ordinal=None, asynchronous=False):
"""
Parameters
----------
target_device : {'cpu', 'cuda'}
Device to transfer to.
device_ordinal : int
Device ordinal if target_device == 'cuda'.
asynchron : bool
asynchronous : bool
Whether to use asynchronous transfers.
"""
super(DeviceTransfer, self).__init__()
Expand All @@ -29,11 +30,11 @@ def __init__(self, target_device, device_ordinal=None, asynchron=False):
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
self.asynchron = asynchron

def forward(self, *inputs):
if self.target_device == 'cuda':
transferred = tuple(input_.cuda(device_id=self.device_ordinal, asynchron=self.asynchron)
transferred = tuple(input_.cuda(device=self.device_ordinal,
non_blocking=self.asynchronous)
for input_ in inputs)
elif self.target_device == 'cpu':
transferred = tuple(input_.cpu() for input_ in inputs)
Expand All @@ -48,7 +49,7 @@ class OnDevice(nn.Module):
that the inputs are transferred to the same device as the module, enabling easy model
parallelism.
"""
def __init__(self, module, target_device, device_ordinal=None, asynchron=False):
def __init__(self, module, target_device, device_ordinal=None, asynchronous=False):
"""
Parameters
----------
Expand All @@ -58,7 +59,7 @@ def __init__(self, module, target_device, device_ordinal=None, asynchron=False):
The device to move `module` to. Must be either 'cuda' or 'cpu'.
device_ordinal : int
Ordinal of the GPU device if `target_device = 'cuda'`.
asynchron : bool
asynchronous : bool
Whether to use asynchronous transfers.
"""
super(OnDevice, self).__init__()
Expand All @@ -72,11 +73,11 @@ def __init__(self, module, target_device, device_ordinal=None, asynchron=False):
DeviceError)
self.target_device = target_device
self.device_ordinal = device_ordinal
self.asynchron = asynchron
self.asynchronous = asynchronous
# This is a no-op if module is already in the right device
self.device_transfer = DeviceTransfer(self.target_device,
device_ordinal=self.device_ordinal,
asynchron=self.asynchron)
asynchronous=self.asynchronous)

self.module = self.transfer_module(module)

Expand Down
27 changes: 26 additions & 1 deletion inferno/extensions/layers/sampling.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch.nn as nn

__all__ = ['AnisotropicUpsample', 'AnisotropicPool', 'Upsample']
__all__ = ['AnisotropicUpsample', 'AnisotropicPool', 'Upsample', 'AnisotropicUpsample2D', 'AnisotropicPool2D']


# torch is deprecating nn.Upsample in favor of nn.functional.interpolate
Expand Down Expand Up @@ -56,4 +56,29 @@ def __init__(self, downscale_factor):
stride=(1, ds, ds),
padding=(0, 1, 1))

class AnisotropicUpsample2D(nn.Module):
def __init__(self, scale_factor):
super(AnisotropicUpsample2D, self).__init__()
self.upsampler = nn.Upsample(scale_factor=scale_factor)

def forward(self, input):
# input is 2D of shape NCDW (or NCDH, egal)
N, C, D, W = input.size()
# Fold C and D axes in one
folded = input.view(N, C * D, W)
# Upsample
upsampled = self.upsampler(folded)
# Unfold out the C and D axes
unfolded = upsampled.view(N, C, D,
self.upsampler.scale_factor * W)
# Done
return unfolded


class AnisotropicPool2D(nn.MaxPool2d):
def __init__(self, downscale_factor):
ds = downscale_factor
super(AnisotropicPool2D, self).__init__(kernel_size=(1, ds + 1),
stride=(1, ds),
padding=(0, 1))

2 changes: 2 additions & 0 deletions inferno/extensions/metrics/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def forward(self, prediction, target):
raise ShapeError("Target must have the same number of dimensions as the "
"prediction, or one less. Got target.dim() = {} but "
"prediction.dim() = {}.".format(target.dim(), prediction.dim()))
# Cast onehot_targets to float if required (this is a no-op if it's already float)
onehot_targets = onehot_targets.float()
# Sharpen prediction if required to. Sharpening in this sense means to replace
# the max predicted probability with 1.
if self.sharpen_prediction:
Expand Down

0 comments on commit f18a526

Please sign in to comment.