Skip to content

Commit

Permalink
[Relay] Fixed bug in attribute parsing for pool layers. (apache#5582)
Browse files Browse the repository at this point in the history
* Fixed pooling bug.

* Added tests and fixed more cases.
  • Loading branch information
jwfromm authored and Trevor Morris committed Jun 18, 2020
1 parent 0f43d53 commit 1bb757a
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 15 deletions.
32 changes: 27 additions & 5 deletions python/tvm/relay/op/nn/nn.py
Expand Up @@ -19,7 +19,7 @@
from tvm.relay import expr

from . import _make
from .util import get_pad_tuple2d, get_pad_tuple3d
from .util import get_pad_tuple1d, get_pad_tuple2d, get_pad_tuple3d


def conv1d(data,
Expand Down Expand Up @@ -601,10 +601,11 @@ def max_pool1d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size,)
if isinstance(strides, int):
strides = (strides,)
if isinstance(padding, int):
padding = (padding,)
padding = get_pad_tuple1d(padding)
return _make.max_pool1d(data, pool_size, strides, padding,
layout, ceil_mode)

Expand Down Expand Up @@ -661,6 +662,11 @@ def max_pool2d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
padding = get_pad_tuple2d(padding)
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)

Expand Down Expand Up @@ -709,6 +715,11 @@ def max_pool3d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
padding = get_pad_tuple3d(padding)
return _make.max_pool3d(data, pool_size, strides, padding,
layout, ceil_mode)

Expand Down Expand Up @@ -761,10 +772,11 @@ def avg_pool1d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size,)
if isinstance(strides, int):
strides = (strides,)
if isinstance(padding, int):
padding = (padding,)
padding = get_pad_tuple1d(padding)
return _make.avg_pool1d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)

Expand Down Expand Up @@ -826,6 +838,11 @@ def avg_pool2d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
padding = get_pad_tuple2d(padding)
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)

Expand Down Expand Up @@ -878,6 +895,11 @@ def avg_pool3d(data,
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
padding = get_pad_tuple3d(padding)
return _make.avg_pool3d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)

Expand Down
31 changes: 31 additions & 0 deletions python/tvm/relay/op/nn/util.py
Expand Up @@ -19,6 +19,37 @@
from tvm.ir import container


def get_pad_tuple1d(padding):
"""Common code to get the 1 dimensional pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_left : int
Padding size on left
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 1:
pad_w = padding[0] * 2
elif len(padding) == 2:
return padding[0], padding[1]
else:
raise ValueError("Size of padding can only be 1 or 2")
elif isinstance(padding, int):
pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_left = (pad_w + 1) // 2
return pad_left, pad_w - pad_left


def get_pad_tuple2d(padding):
"""Common code to get the pad option
Parameters
Expand Down
24 changes: 17 additions & 7 deletions tests/python/relay/test_op_level2.py
Expand Up @@ -747,7 +747,7 @@ def test_upsampling3d_infer_type():
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32")

def _test_pool2d(opfunc, reffunc):
def _test_pool2d(opfunc, reffunc, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)):
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
Expand All @@ -758,7 +758,7 @@ def _test_pool2d(opfunc, reffunc):
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5))
Expand Down Expand Up @@ -815,7 +815,9 @@ def _test_global_pool2d(opfunc, reffunc):

def test_pool2d():
_test_pool2d(relay.nn.max_pool2d, np.max)
_test_pool2d(relay.nn.max_pool2d, np.max, pool_size=2, strides=2, padding=0)
_test_pool2d(relay.nn.avg_pool2d, np.mean)
_test_pool2d(relay.nn.avg_pool2d, np.mean, pool_size=2, strides=2, padding=0)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
Expand All @@ -824,7 +826,7 @@ def test_pool2d():

def test_pool1d():

def _test_pool1d(opfunc):
def _test_pool1d(opfunc, pool_size=(2,), strides=(2,), padding=(0, 0)):
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x, pool_size=(1,))
Expand All @@ -836,7 +838,7 @@ def _test_pool1d(opfunc):
dshape = (1, 3, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0))
y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),
Expand All @@ -847,12 +849,18 @@ def _test_pool1d(opfunc):
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)

_test_pool1d(relay.nn.max_pool1d)
_test_pool1d(relay.nn.max_pool1d, pool_size=2, strides=2, padding=0)
_test_pool1d(relay.nn.avg_pool1d)
_test_pool1d(relay.nn.avg_pool1d, pool_size=2, strides=2, padding=0)


def test_pool3d():

def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)):
def _test_pool3d(opfunc,
pool_size=(2, 2, 2),
strides=(2, 2, 2),
padding=(0, 0, 0, 0, 0, 0),
out_shape=(1, 3, 16, 16, 16)):
n, c, d, h, w = te.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
Expand All @@ -864,14 +872,14 @@ def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16
dshape = (1, 3, 32, 32, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding)
y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding)
func = relay.Function([x], y)
# check output shape
f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape))
assert out_shape == f_out_shape, \
"Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2),
ref_res = topi.testing.pool3d_ncdhw_python(data, pool_size, strides,
padding, out_shape, pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
Expand All @@ -882,10 +890,12 @@ def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16
_test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
_test_pool3d(relay.nn.max_pool3d, pool_size=2, padding=0, strides=2)
_test_pool3d(relay.nn.avg_pool3d)
_test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
_test_pool3d(relay.nn.avg_pool3d, pool_size=2, padding=0, strides=2)


def test_avg_pool2d_no_count_pad():
Expand Down
15 changes: 12 additions & 3 deletions topi/python/topi/testing/pool3d_python.py
Expand Up @@ -27,9 +27,18 @@ def pool3d_ncdhw_python(np_data, kernel,
ceil_mode=False, dtype="float32"):
"""baseline for max_pool3d and avg_pool3d, default layout is "NCDHW"""
in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape
k_d, k_h, k_w = kernel
s_d, s_h, s_w = strides
pf, pt, pl, pk, pb, pr = padding
if isinstance(kernel, int):
k_d = k_h = k_w = kernel
else:
k_d, k_h, k_w = kernel
if isinstance(strides, int):
s_d = s_h = s_w = strides
else:
s_d, s_h, s_w = strides
if isinstance(padding, int):
pf = pt = pl = pk = pb = pr = padding
else:
pf, pt, pl, pk, pb, pr = padding

if ceil_mode:
assert out_shape[2] == int(math.ceil(float(in_shape[2] - k_d + pf + pk) / s_d) + 1)
Expand Down

0 comments on commit 1bb757a

Please sign in to comment.