Skip to content

Commit

Permalink
Merge branch 'master' into train_epoch_merge_datasets_batches
Browse files Browse the repository at this point in the history
# Conflicts (solved):
#	src/trw/train/compatibility.py
#	src/trw/transforms/affine.py
  • Loading branch information
ncapobianco committed Nov 12, 2020
2 parents 8a4fe91 + 3ea2346 commit 784ae13
Show file tree
Hide file tree
Showing 81 changed files with 2,776 additions and 1,039 deletions.
18 changes: 18 additions & 0 deletions mypy.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
[mypy]
mypy_path = ./src/trw
cache_dir = ../.mypy_cache

ignore_missing_imports = True
ignore_errors = True

#[mypy-src.trw.datasets.*]
#ignore_errors = False

[mypy-src.trw.utils.*]
ignore_errors = False

[mypy-src.trw.layers.*]
ignore_errors = False

[mypy-src.trw.transforms.*]
ignore_errors = False
3 changes: 3 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,6 @@ tensorboard
onnx
bokeh >= 2.1
scipy
typing-extensions
threadpoolctl
psutil
1 change: 1 addition & 0 deletions src/trw/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,4 @@
from . import hparams
from . import simple_layers
from . import arch
from . import basic_typing
82 changes: 82 additions & 0 deletions src/trw/basic_typing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from typing import Sequence, Union, Dict, Any, List
from typing_extensions import Protocol # backward compatibility for python 3.6-3.7
import numpy as np
import torch

"""Generic numeric type"""
Numeric = Union[int, float]

"""Generic Shape"""
Shape = Sequence[int]

"""Shape expressed as [N, C, D, H, W, ...] components"""
ShapeNCX = Sequence[int]

"""Shape expressed as [C, D, H, W, ...] components"""
ShapeCX = Sequence[int]

"""Shape expressed as [D, H, W, ...] components"""
ShapeX = Sequence[int]

"""Generic Tensor as numpy or torch"""
Tensor = Union[np.ndarray, torch.Tensor]

"""Generic Tensor as numpy or torch. Must be shaped as [N, C, D, H, W, ...]"""
TensorNCX = Union[np.ndarray, torch.Tensor]

"""Generic Tensor as numpy or torch. Must be shaped as 2D array [N, X]"""
TensorNX = Union[np.ndarray, torch.Tensor]

"""Generic Tensor with th `N` and `C` components removed"""
TensorX = Union[np.ndarray, torch.Tensor]


"""Torch Tensor. Must be shaped as [N, C, D, H, W, ...]"""
TorchTensorNCX = torch.Tensor

"""Torch Tensor. Must be shaped as 2D array [N, X]"""
TorchTensorNX = torch.Tensor

"""Torch Tensor with th `N` and `C` components removed"""
TorchTensorX = torch.Tensor


"""Numpy Tensor. Must be shaped as [N, C, D, H, W, ...]"""
NumpyTensorNCX = np.ndarray

"""Numpy Tensor. Must be shaped as 2D array [N, X]"""
NumpyTensorNX = np.ndarray

"""Numpy Tensor with th `N` and `C` components removed"""
NumpyTensorX = np.ndarray

"""Represent a dictionary of (key, value)"""
Batch = Dict[str, Any]

"""Length shaped as D, H, W, ..."""
Length = Sequence[float]

"""Represent a data split, a dictionary of any value"""
Split = Dict[str, Any]

"""Represent a dataset which is composed of named data splits"""
Dataset = Dict[str, Split]

"""Represent a collection of datasets"""
Datasets = Dict[str, Dataset]
DatasetsInfo = Datasets


Activation = Any


NestedIntSequence = List[Sequence[int]]

ConvKernels = Union[int, List[int], NestedIntSequence]
ConvStrides = ConvKernels
PoolingSizes = ConvKernels

Stride = Union[int, Sequence[int]]
KernelSize = Union[int, Sequence[int]]
Padding = Union[int, str, Sequence[int]]
Paddings = Union[str, int, List[int], List[str], NestedIntSequence]
20 changes: 13 additions & 7 deletions src/trw/datasets/medical_decathlon.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,11 +110,10 @@ def create_decathlon_dataset(
root: str = None,
transform_train: Transform = None,
transform_valid: Transform = None,
nb_workers: int = 2,
nb_workers: int = 4,
valid_ratio: float = 0.2,
batch_size: int = 1,
remove_patient_transform=False,
reservoir_size: int = 20):
remove_patient_transform=False):
"""
Create a task of the medical decathlon dataset.
Expand All @@ -126,10 +125,9 @@ def create_decathlon_dataset(
root: the root folder where the data will be created and possibly downloaded
transform_train: a function that take a batch of training data and return a transformed batch
transform_valid: a function that take a batch of valid data and return a transformed batch
nb_workers: the number of worker in each reservoir
nb_workers: the number of workers used for the preprocessing
valid_ratio: the ratio of validation data
batch_size: the batch size
reservoir_size: the maximum number of samples stored in the reservoir
remove_patient_transform: if ``True``, remove the affine transformation attached to the voxels
Returns:
Expand Down Expand Up @@ -158,7 +156,11 @@ def create_decathlon_dataset(
])
load_data_train = functools.partial(_load_case_adaptor, dataset=dataset, transform_fn=transform_train)
sequence_train = SequenceArray(data_train, sampler=sampler_train, sample_uid_name='sample_uid')
sequence_train = sequence_train.async_reservoir(function_to_run=load_data_train, min_reservoir_samples=reservoir_size, max_reservoir_samples=reservoir_size, nb_workers=nb_workers, max_reservoir_replacement_size=10, max_jobs_at_once=20)
sequence_train = sequence_train.map(
function_to_run=load_data_train,
nb_workers=nb_workers,
max_jobs_at_once=4
)
sequence_train = sequence_train.collate()

sampler_valid = SamplerSequential(batch_size=batch_size)
Expand All @@ -168,7 +170,11 @@ def create_decathlon_dataset(

load_data_valid = functools.partial(_load_case_adaptor, dataset=dataset, transform_fn=transform_valid)
sequence_valid = SequenceArray(data_valid, sampler=sampler_valid, sample_uid_name='sample_uid')
sequence_valid = sequence_valid.map(functools.partial(load_data_valid, dataset=dataset))
sequence_valid = sequence_valid.map(
functools.partial(load_data_valid, dataset=dataset),
nb_workers=nb_workers,
max_jobs_at_once=4
)
sequence_valid = sequence_valid.collate()

dataset = collections.OrderedDict([
Expand Down
2 changes: 1 addition & 1 deletion src/trw/datasets/voc2012.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def default_voc_transforms():
return trw.transforms.TransformCompose([
trw.transforms.TransformResize(size=[250, 250]),
#trw.transforms.TransformRandomCropPad(feature_names=['images', 'masks'], padding=None, shape=[3, 224, 224]),
trw.transforms.TransformNormalize(criteria_fn=criteria_images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
trw.transforms.TransformNormalizeIntensity(criteria_fn=criteria_images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])


Expand Down
4 changes: 2 additions & 2 deletions src/trw/layers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .ops_conversion import OpsConversion
from .layer_config import LayerConfig, default_layer_config, NormType
from .blocks import BlockConvNormActivation, BlockDeconvNormActivation, BlockUpDeconvSkipConv, BlockPool
from .blocks import BlockConvNormActivation, BlockDeconvNormActivation, BlockUpDeconvSkipConv, BlockPool, BlockRes

from .utils import div_shape
from .flatten import Flatten
Expand All @@ -20,4 +20,4 @@
from .autoencoder_convolutional_variational import AutoencoderConvolutionalVariational
from .autoencoder_convolutional_variational_conditional import AutoencoderConvolutionalVariationalConditional
from .gan import Gan, GanDataPool

from .encoder_decoder_resnet import EncoderDecoderResnet
11 changes: 6 additions & 5 deletions src/trw/layers/autoencoder_convolutional.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@

import torch
import torch.nn as nn
from trw.basic_typing import NestedIntSequence, Activation
from trw.layers import ModuleWithIntermediate, ConvsBase, ConvsTransposeBase, crop_or_pad_fun, NormType, LayerConfig, \
default_layer_config


class AutoencoderConvolutional(nn.Module, ModuleWithIntermediate, ABC):
class AutoencoderConvolutional(nn.Module, ModuleWithIntermediate):
"""
Convolutional autoencoder
Expand All @@ -23,11 +24,11 @@ def __init__(
encoder_channels: Sequence[int],
decoder_channels: Sequence[int],
convolution_kernels: Optional[Union[int, Sequence[int]]] = 5,
encoder_strides: Union[int, Sequence[int]] = 1,
decoder_strides: Union[int, Sequence[int]] = 2,
pooling_size: Optional[Union[int, Sequence[int]]] = 2,
encoder_strides: Union[int, List[int], NestedIntSequence] = 1,
decoder_strides: Union[int, List[int], NestedIntSequence] = 2,
pooling_size: Optional[Union[int, List[int], NestedIntSequence]] = 2,
convolution_repeats: Union[int, Sequence[int]] = 1,
activation: Any = nn.ReLU,
activation: Optional[Activation] = nn.ReLU,
dropout_probability: Optional[float] = None,
norm_type: NormType = NormType.BatchNorm,
norm_kwargs: Dict = {},
Expand Down
2 changes: 1 addition & 1 deletion src/trw/layers/autoencoder_convolutional_variational.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class AutoencoderConvolutionalVariational(nn.Module):
"""
def __init__(
self,
input_shape: Union[List[int], Tuple[int, ...], Sequence[int]],
input_shape: Union[torch.Size, List[int], Tuple[int, ...]],
encoder: nn.Module,
decoder: nn.Module,
z_size: int,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Union, Sequence, List
from typing import Union, Sequence, List, Tuple

import torch.nn as nn
import torch
Expand All @@ -19,7 +19,7 @@ class AutoencoderConvolutionalVariationalConditional(nn.Module):
"""
def __init__(
self,
input_shape: Union[Sequence[int], List[int]],
input_shape: Union[torch.Size, List[int], Tuple[int, ...]],
encoder: nn.Module,
decoder: nn.Module,
z_size: int,
Expand Down
Loading

0 comments on commit 784ae13

Please sign in to comment.