Skip to content

Commit

Permalink
add type annotations to torch.nn.modules.fold (pytorch#49479)
Browse files Browse the repository at this point in the history
Summary:
closes pytorchgh-49478

Fixes pytorch#49478

Pull Request resolved: pytorch#49479

Reviewed By: mruberry

Differential Revision: D25723838

Pulled By: walterddr

fbshipit-source-id: 45c4cbd6f147b6dc4a5f5419c17578c49c201022
  • Loading branch information
guilhermeleobas authored and hwangdeyu committed Jan 14, 2021
1 parent 38cacf2 commit 8d40359
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 8 deletions.
3 changes: 0 additions & 3 deletions mypy.ini
Expand Up @@ -76,9 +76,6 @@ ignore_errors = True
[mypy-torch.nn.modules.conv]
ignore_errors = True

[mypy-torch.nn.modules.fold]
ignore_errors = True

[mypy-torch.nn.modules.module]
ignore_errors = True

Expand Down
10 changes: 5 additions & 5 deletions torch/nn/functional.pyi.in
@@ -1,7 +1,7 @@
from torch import Tensor
from torch.types import _size
from typing import Any, Optional, Tuple, Dict, List, Callable, Sequence, Union
from .common_types import _ratio_any_t, _size_1_t, _size_2_t, _size_3_t, _size_2_opt_t, _size_3_opt_t
from .common_types import _ratio_any_t, _size_any_t, _size_1_t, _size_2_t, _size_3_t, _size_2_opt_t, _size_3_opt_t

# 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
# It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
Expand Down Expand Up @@ -335,12 +335,12 @@ def normalize(input: Tensor, p: float = ..., dim: int = ..., eps: float = ...,
def assert_int_or_pair(arg: Any, arg_name: Any, message: Any) -> None: ...


def unfold(input: Tensor, kernel_size: _size, dilation: _size = ..., padding: _size = ...,
stride: _size = ...) -> Tensor: ...
def unfold(input: Tensor, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ...,
stride: _size_any_t = ...) -> Tensor: ...


def fold(input: Tensor, output_size: _size, kernel_size: _size, dilation: _size = ..., padding: _size = ...,
stride: _size = ...) -> Tensor: ...
def fold(input: Tensor, output_size: _size_any_t, kernel_size: _size_any_t, dilation: _size_any_t = ..., padding: _size_any_t = ...,
stride: _size_any_t = ...) -> Tensor: ...


def multi_head_attention_forward(query: Tensor,
Expand Down

0 comments on commit 8d40359

Please sign in to comment.