Skip to content

Commit

Permalink
Test nn.AdaptiveAvgPoolXd (#5615)
Browse files Browse the repository at this point in the history
* Fix the bad backward kernel function by using 'cuda::atomic::Add'

* Support the 'NoneType' annotation

* Support objects of 'collections.abc.Iterable' as 'output_size'

* Test with all cases of 'output_size'
  • Loading branch information
Tianyu Zhao committed Jul 27, 2021
1 parent ba4e97e commit 0415486
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 31 deletions.
51 changes: 23 additions & 28 deletions oneflow/python/nn/modules/adaptive_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,23 @@
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.common_types import _size_1_t
from oneflow.python.nn.modules.utils import _single, _pair, _triple


def _generate_output_size(input_size, output_size):
new_output_size = []
if isinstance(output_size, int):
for _ in range(len(input_size) - 2):
new_output_size.append(output_size)
elif isinstance(output_size, tuple):
assert len(input_size) - 2 == len(
output_size
), f"The length of 'output_size' does not match the input size, {len(input_size) - 2} expected"
for i in range(len(output_size)):
if output_size[i] is None:
new_output_size.append(input_size[i + 2])
else:
assert isinstance(
output_size[i], int
), "numbers in 'output_size' should be integer"
new_output_size.append(output_size[i])
else:
raise ValueError("invalid 'output_size', 'int' or 'tuple' expected")
assert len(input_size) - 2 == len(
output_size
), f"the length of 'output_size' does not match the input size, {len(input_size) - 2} expected"
for i in range(len(output_size)):
if output_size[i] is None:
new_output_size.append(input_size[i + 2])
else:
assert isinstance(
output_size[i], int
), "numbers in 'output_size' should be integer"
new_output_size.append(output_size[i])
return tuple(new_output_size)


Expand Down Expand Up @@ -68,19 +64,16 @@ class AdaptiveAvgPool1d(Module):
"""

def __init__(self, output_size) -> None:
def __init__(self, output_size: _size_1_t) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _single(output_size)

def forward(self, x):
assert len(x.shape) == 3
if isinstance(self.output_size, tuple):
new_output_size = self.output_size[0]
elif isinstance(self.output_size, int):
new_output_size = self.output_size
else:
raise ValueError("'output_size' should be integer or tuple")
return flow.F.adaptive_avg_pool1d(x, output_size=(new_output_size,))
assert len(self.output_size) == 1, f"the length of 'output_size' does not match the input size, 1 expected"
assert isinstance(self.output_size[0], int), "numbers in 'output_size' should be integer"
return flow.F.adaptive_avg_pool1d(x, output_size=self.output_size)


@oneflow_export("adaptive_avg_pool1d")
Expand Down Expand Up @@ -142,7 +135,8 @@ class AdaptiveAvgPool2d(Module):

def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _pair(output_size)

def forward(self, x):
assert len(x.shape) == 4
Expand Down Expand Up @@ -209,7 +203,8 @@ class AdaptiveAvgPool3d(Module):

def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size
assert output_size is not None
self.output_size = _triple(output_size)

def forward(self, x):
assert len(x.shape) == 5
Expand Down
13 changes: 10 additions & 3 deletions oneflow/python/test/modules/test_adaptive_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,17 @@
import numpy as np

import oneflow.experimental as flow
from oneflow.python.nn.common_types import _size_1_t
from typing import Union, Tuple

from test_util import GenArgList
from automated_test_util import *

NoneType = type(None)
# Not the same as those in PyTorch because 'output_size' cannot be NoneType (even in 'torch.nn.AdaptiveAvgPoolXd')
_size_2_opt_t_not_none = Union[int, Tuple[Union[int, NoneType], Union[int, NoneType]]]
_size_3_opt_t_not_none = Union[int, Tuple[Union[int, NoneType], Union[int, NoneType], Union[int, NoneType]]]

# TODO: auto test


Expand Down Expand Up @@ -864,7 +871,7 @@ def _test_adaptive_avgpool3d_dhw_backward(test_case, device):
class TestAdaptiveAvgPool(flow.unittest.TestCase):
@autotest()
def test_adaptive_avgpool1d(test_case):
m = torch.nn.AdaptiveAvgPool1d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool1d(output_size=random().to(_size_1_t))
m.train(random())
device = random_device()
m.to(device)
Expand All @@ -874,7 +881,7 @@ def test_adaptive_avgpool1d(test_case):

@autotest()
def test_adaptive_avgpool2d(test_case):
m = torch.nn.AdaptiveAvgPool2d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool2d(output_size=random().to(_size_2_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
Expand All @@ -884,7 +891,7 @@ def test_adaptive_avgpool2d(test_case):

@autotest()
def test_adaptive_avgpool3d(test_case):
m = torch.nn.AdaptiveAvgPool3d(output_size=random(1, 5).to(int))
m = torch.nn.AdaptiveAvgPool3d(output_size=random().to(_size_3_opt_t_not_none))
m.train(random())
device = random_device()
m.to(device)
Expand Down
3 changes: 3 additions & 0 deletions oneflow/python/test_utils/automated_test_util/generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import torch

py_tuple = tuple
NoneType = type(None)

TEST_MODULE = 0
TEST_FLOW = 1
Expand Down Expand Up @@ -247,6 +248,8 @@ def _generate(self, annotation):
val = float(rng.random() * (high - low) + low)
elif annotation == bool:
val = random_util.choice([True, False])
elif annotation == NoneType:
val = None
else:
raise NotImplementedError(
f"Not implemented annotation {annotation} in random"
Expand Down

0 comments on commit 0415486

Please sign in to comment.