Skip to content

Commit

Permalink
Aim PyTorch 1.4.0
Browse files Browse the repository at this point in the history
  • Loading branch information
sublee committed Jan 18, 2020
2 parents b3ae91c + 2dbce8d commit 34f344e
Show file tree
Hide file tree
Showing 20 changed files with 520 additions and 166 deletions.
3 changes: 2 additions & 1 deletion benchmarks/unet-timeline/gpu_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from contextlib import contextmanager
import multiprocessing as mp
import subprocess
from typing import Dict, Generator, List
from typing import Dict, Generator, List, cast


def collect_gpu_utils(device_ids: List[int]) -> List[int]:
Expand Down Expand Up @@ -48,6 +48,7 @@ def track_gpu_utils(device_ids: List[int],
) -> Generator[List[float], None, None]:
# Spawn a worker.
ctx = mp.get_context('spawn')
ctx = cast(mp.context.DefaultContext, ctx)
conn, conn_worker = ctx.Pipe(duplex=True)
p = ctx.Process(target=_worker, args=(device_ids, interval, conn_worker))
p.start()
Expand Down
497 changes: 403 additions & 94 deletions stubs/torch/__init__.pyi

Large diffs are not rendered by default.

11 changes: 6 additions & 5 deletions stubs/torch/cuda/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ _device_t = Union[_device, int]
def check_error(res: int) -> None: ...
def device_count() -> int: ...
def empty_cache() -> None: ...
def synchronize(device: _device_t = ...) -> None: ...
def synchronize(device: _device_t) -> None: ...
def set_device(device: _device_t) -> None: ...
def get_device_capability(device: Optional[_device_t]=...) -> Tuple[int, int]: ...
def get_device_name(device: Optional[_device_t]=...) -> str: ...
Expand All @@ -40,6 +40,11 @@ def max_memory_cached(device: Optional[_device_t]=...) -> int: ...
def reset_max_memory_cached(device: Optional[_device_t]=...) -> None: ...
def cudart() -> ctypes.CDLL: ...
def find_cuda_windows_lib() -> Optional[ctypes.CDLL]: ...
#MODIFIED BY TORCHGPIPE
from .. import ByteTensor
def set_rng_state(new_state: ByteTensor, device: _device_t = ...) -> None: ...
def get_rng_state(device: _device_t = ...) -> ByteTensor: ...
#END

#MODIFIED BY TORCHGPIPE
from typing import Any
Expand All @@ -61,8 +66,4 @@ class stream:

def current_stream(device: Optional[_device_t]) -> Stream: ...
def default_stream(device: Optional[_device_t]) -> Stream: ...

from .. import ByteTensor
def set_rng_state(new_state: ByteTensor, device: _device_t = ...) -> None: ...
def get_rng_state(device: _device_t = ...) -> ByteTensor: ...
#END
14 changes: 2 additions & 12 deletions stubs/torch/nn/__init__.pyi
Original file line number Diff line number Diff line change
@@ -1,16 +1,6 @@
from .modules import *
from .modules import *
from .parameter import Parameter as Parameter
from .parallel import DataParallel as DataParallel
from . import init as init
from . import utils as utils

#MODIFIED BY TORCHGPIPE
from .. import Tensor
class Flatten(Module):
start_dim: int
end_dim: int
def forward(self, input: Tensor) -> Tensor: ... # type: ignore

class Identity(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
#END
from . import functional as functional
10 changes: 7 additions & 3 deletions stubs/torch/nn/common_types.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,18 @@ _scalar_or_tuple_any_t = Union[T, Tuple[T, ...]]
_scalar_or_tuple_1_t = Union[T, Tuple[T]]
_scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
_scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
_scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
_scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
_scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]

# For arguments which represent size parameters (eg, kernel size, padding)
_size_any_t = _scalar_or_tuple_any_t[int]
_size_1_t = _scalar_or_tuple_1_t[int]
#MODIFIED BY TORCHGPIPE
_size_2_t = Union[int, Tuple[int, int], Tuple[int, int, int, int]]
#END
_size_2_t = _scalar_or_tuple_2_t[int]
_size_3_t = _scalar_or_tuple_3_t[int]
_size_4_t = _scalar_or_tuple_4_t[int]
_size_5_t = _scalar_or_tuple_5_t[int]
_size_6_t = _scalar_or_tuple_6_t[int]

# For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
_ratio_2_t = _scalar_or_tuple_2_t[float]
Expand Down
3 changes: 3 additions & 0 deletions stubs/torch/nn/functional.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,9 @@ def rrelu(input: Tensor, lower: float = ..., upper: float = ..., training: bool
inplace: bool = ...) -> Tensor: ...


def gelu(input: Any): ...


def hardshrink(input: Tensor, lambd: float = ...) -> Tensor: ...


Expand Down
16 changes: 10 additions & 6 deletions stubs/torch/nn/modules/__init__.pyi
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from .module import Module as Module
from .activation import CELU as CELU, ELU as ELU, GLU as GLU, Hardshrink as Hardshrink, Hardtanh as Hardtanh, \
LeakyReLU as LeakyReLU, LogSigmoid as LogSigmoid, LogSoftmax as LogSoftmax, PReLU as PReLU, RReLU as RReLU, \
ReLU as ReLU, ReLU6 as ReLU6, SELU as SELU, Sigmoid as Sigmoid, Softmax as Softmax, Softmax2d as Softmax2d, \
Softmin as Softmin, Softplus as Softplus, Softshrink as Softshrink, Softsign as Softsign, Tanh as Tanh, \
Tanhshrink as Tanhshrink, Threshold as Threshold
from .activation import CELU as CELU, ELU as ELU, GLU as GLU, GELU as GELU, Hardshrink as Hardshrink, \
Hardtanh as Hardtanh, LeakyReLU as LeakyReLU, LogSigmoid as LogSigmoid, LogSoftmax as LogSoftmax, PReLU as PReLU, \
RReLU as RReLU, ReLU as ReLU, ReLU6 as ReLU6, SELU as SELU, Sigmoid as Sigmoid, Softmax as Softmax, \
Softmax2d as Softmax2d, Softmin as Softmin, Softplus as Softplus, Softshrink as Softshrink, Softsign as Softsign, \
Tanh as Tanh, Tanhshrink as Tanhshrink, Threshold as Threshold
from .adaptive import AdaptiveLogSoftmaxWithLoss as AdaptiveLogSoftmaxWithLoss
from .batchnorm import BatchNorm1d as BatchNorm1d, BatchNorm2d as BatchNorm2d, BatchNorm3d as BatchNorm3d, \
SyncBatchNorm as SyncBatchNorm
Expand All @@ -17,7 +17,7 @@ from .dropout import AlphaDropout as AlphaDropout, Dropout as Dropout, Dropout2d
from .fold import Fold as Fold, Unfold as Unfold
from .instancenorm import InstanceNorm1d as InstanceNorm1d, InstanceNorm2d as InstanceNorm2d, \
InstanceNorm3d as InstanceNorm3d
from .linear import Bilinear as Bilinear, Linear as Linear
from .linear import Bilinear as Bilinear, Identity as Identity, Linear as Linear
from .loss import BCELoss as BCELoss, BCEWithLogitsLoss as BCEWithLogitsLoss, CTCLoss as CTCLoss, \
CosineEmbeddingLoss as CosineEmbeddingLoss, CrossEntropyLoss as CrossEntropyLoss, \
HingeEmbeddingLoss as HingeEmbeddingLoss, KLDivLoss as KLDivLoss, L1Loss as L1Loss, MSELoss as MSELoss, \
Expand All @@ -44,3 +44,7 @@ from .rnn import GRU as GRU, GRUCell as GRUCell, LSTM as LSTM, LSTMCell as LSTMC
from .sparse import Embedding as Embedding, EmbeddingBag as EmbeddingBag
from .upsampling import Upsample as Upsample, UpsamplingBilinear2d as UpsamplingBilinear2d, \
UpsamplingNearest2d as UpsamplingNearest2d

#MODIFIED BY TORCHGPIPE
from .modules.flatten import Flatten as Flatten
#END
5 changes: 5 additions & 0 deletions stubs/torch/nn/modules/activation.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ class GLU(Module):
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore


class GELU(Module):
def forward(self, input: Tensor) -> Tensor: ... # type: ignore
def __call__(self, input: Tensor) -> Tensor: ... # type: ignore


class Hardshrink(Module):
lambd: float = ...

Expand Down
10 changes: 5 additions & 5 deletions stubs/torch/nn/modules/container.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .module import Module
from typing import Any, Optional, Union, overload, TypeVar, Iterable, Tuple, Mapping
from typing import Any, Optional, Union, overload, TypeVar, Iterable, Tuple, Mapping, Iterator
from collections import OrderedDict
from ... import Tensor
from .. import Parameter
Expand Down Expand Up @@ -56,7 +56,7 @@ class ModuleList(Module):

def __len__(self) -> int: ...

def __iter__(self) -> Iterable[Module]: ...
def __iter__(self) -> Iterator[Module]: ...

def __iadd__(self: T, modules: Iterable[Module]) -> T: ...

Expand All @@ -78,7 +78,7 @@ class ModuleDict(Module):

def __len__(self) -> int: ...

def __iter__(self) -> Iterable[str]: ...
def __iter__(self) -> Iterator[str]: ...

def __contains__(self, key: str) -> bool: ...

Expand Down Expand Up @@ -110,7 +110,7 @@ class ParameterList(Module):

def __len__(self) -> int: ...

def __iter__(self) -> Iterable[Parameter]: ...
def __iter__(self) -> Iterator[Parameter]: ...

def __iadd__(self: T, parameters: Iterable[Parameter]) -> T: ...

Expand All @@ -132,7 +132,7 @@ class ParameterDict(Module):

def __len__(self) -> int: ...

def __iter__(self) -> Iterable[str]: ...
def __iter__(self) -> Iterator[str]: ...

def __contains__(self, key: str) -> bool: ...

Expand Down
10 changes: 10 additions & 0 deletions stubs/torch/nn/modules/flatten.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from typing import Any
from .module import Module

class Flatten(Module):
__constants__: Any = ...
start_dim: Any = ...
end_dim: Any = ...
def __init__(self, start_dim: int = ..., end_dim: int = ...) -> None: ...
def forward(self, input: Any): ... # type: ignore
def __call__(self, input: Any): ... # type: ignore
9 changes: 9 additions & 0 deletions stubs/torch/nn/modules/linear.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@ from .. import Parameter
from ... import Tensor


class Identity(Module):

def __init__(self) -> None: ...

def forward(self, input: Tensor) -> Tensor: ... # type: ignore

def __call__(self, input: Tensor) -> Tensor: ... # type: ignore


class Linear(Module):
in_features: int = ...
out_features: int = ...
Expand Down
10 changes: 5 additions & 5 deletions stubs/torch/nn/modules/module.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class Module(Generic[T_co]):

def apply(self: T, fn: Callable[['Module'], None]) -> T: ...

def cuda(self: T, device: Union[int, device]) -> T: ...
def cuda(self: T, device: Optional[Union[int, device]] = ...) -> T: ...

def cpu(self: T) -> T: ...

Expand Down Expand Up @@ -95,10 +95,6 @@ class Module(Generic[T_co]):
def named_modules(self, memo: Optional[Set['Module']] = ..., prefix: str = ...) -> Iterator[
Tuple[str, 'Module']]: ...

#MODIFIED BY TORCHGPIPE
training: bool
#END

def train(self: T, mode: bool = ...) -> T: ...

def eval(self: T) -> T: ...
Expand All @@ -108,3 +104,7 @@ class Module(Generic[T_co]):
def share_memory(self: T) -> T: ...

def extra_repr(self) -> str: ...

#MODIFIED BY TORCHGPIPE
training: bool
#END
38 changes: 19 additions & 19 deletions stubs/torch/nn/modules/padding.pyi
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .module import Module
from ... import Tensor
from ..common_types import _size_1_t, _size_2_t, _size_3_t
from ..common_types import _size_2_t, _size_4_t, _size_6_t


class _ConstantPadNd(Module):
Expand All @@ -14,21 +14,21 @@ class _ConstantPadNd(Module):


class ConstantPad1d(_ConstantPadNd):
padding: _size_1_t = ...
padding: _size_2_t = ...

def __init__(self, padding: _size_1_t, value: float) -> None: ...
def __init__(self, padding: _size_2_t, value: float) -> None: ...


class ConstantPad2d(_ConstantPadNd):
padding: _size_2_t = ...
padding: _size_4_t = ...

def __init__(self, padding: _size_2_t, value: float) -> None: ...
def __init__(self, padding: _size_4_t, value: float) -> None: ...


class ConstantPad3d(_ConstantPadNd):
padding: _size_3_t = ...
padding: _size_6_t = ...

def __init__(self, padding: _size_3_t, value: float) -> None: ...
def __init__(self, padding: _size_6_t, value: float) -> None: ...


class _ReflectionPadNd(Module):
Expand All @@ -39,15 +39,15 @@ class _ReflectionPadNd(Module):


class ReflectionPad1d(_ReflectionPadNd):
padding: _size_1_t = ...
padding: _size_2_t = ...

def __init__(self, padding: _size_1_t) -> None: ...
def __init__(self, padding: _size_2_t) -> None: ...


class ReflectionPad2d(_ReflectionPadNd):
padding: _size_2_t = ...
padding: _size_4_t = ...

def __init__(self, padding: _size_2_t) -> None: ...
def __init__(self, padding: _size_4_t) -> None: ...


class _ReplicationPadNd(Module):
Expand All @@ -58,24 +58,24 @@ class _ReplicationPadNd(Module):


class ReplicationPad1d(_ReplicationPadNd):
padding: _size_1_t = ...
padding: _size_2_t = ...

def __init__(self, padding: _size_1_t) -> None: ...
def __init__(self, padding: _size_2_t) -> None: ...


class ReplicationPad2d(_ReplicationPadNd):
padding: _size_2_t = ...
padding: _size_4_t = ...

def __init__(self, padding: _size_2_t) -> None: ...
def __init__(self, padding: _size_4_t) -> None: ...


class ReplicationPad3d(_ReplicationPadNd):
padding: _size_3_t = ...
padding: _size_6_t = ...

def __init__(self, padding: _size_3_t) -> None: ...
def __init__(self, padding: _size_6_t) -> None: ...


class ZeroPad2d(ConstantPad2d):
padding: _size_2_t = ...
padding: _size_4_t = ...

def __init__(self, padding: _size_2_t) -> None: ...
def __init__(self, padding: _size_4_t) -> None: ...
2 changes: 1 addition & 1 deletion stubs/torch/nn/parallel/__init__.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .data_parallel import DataParallel as DataParallel, data_parallel as data_parallel
from .distributed import DistributedDataParallel as DistributedDataParallel
from .parallel_apply import parallel_apply as parallel_apply
from .replicate import replicate as repliace
from .replicate import replicate as replicate
from .scatter_gather import gather as gather, scatter as scatter
3 changes: 3 additions & 0 deletions stubs/torch/nn/parameter.pyi
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from .. import Tensor
import builtins

class Parameter(Tensor):
def __init__(self, data: Tensor, requires_grad: builtins.bool): ...

...
11 changes: 9 additions & 2 deletions stubs/torch/optim/lr_scheduler.pyi
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Iterable, Any, Optional
from typing import Iterable, Any, Optional, Callable
from .optimizer import Optimizer

class _LRScheduler:
Expand Down Expand Up @@ -35,4 +35,11 @@ class ReduceLROnPlateau:
def __init__(self, optimizer: Optimizer, mode: str=..., factor: float=..., patience: int=..., verbose: bool=..., threshold: float=..., threshold_mode: str=..., cooldown: int=..., min_lr: float=..., eps: float=...) -> None: ...
def step(self, metrics: Any, epoch: Optional[int]=...) -> None: ...
def state_dict(self) -> dict: ...
def load_state_dict(self, state_dict: dict): ...
def load_state_dict(self, state_dict: dict): ...

class CyclicLR(_LRScheduler):
def __init__(self, optimizer: Optimizer, base_lr: float=..., max_lr: float=..., step_size_up: int=..., step_size_down: int=..., mode: str=..., gamma: float=..., scale_fn: Optional[Callable[[float], float]]=..., scale_mode: str=..., cycle_momentum: bool=..., base_momentum: float=..., max_momentum: float=..., last_epoch: int=...) -> None: ...

class CosineAnnealingWarmRestarts(_LRScheduler):
def __init__(self, optimizer: Optimizer, T_0: int=..., T_mult: int=..., eta_min: int=..., last_epoch: int=...) -> None: ...
def step(self, epoch: Optional[int] = ...) -> None: ...

0 comments on commit 34f344e

Please sign in to comment.