Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refine module test using auto test by yaochi #5484

Merged
merged 18 commits into from
Jul 16, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions oneflow/python/nn/modules/masked_fill.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def forward(self, input, mask):
@oneflow_export("masked_fill")
@register_tensor_op("masked_fill")
@experimental_api
def masked_fill_op(tensor, mask, value):
def masked_fill_op(input, mask, value):
r"""
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is True.
The shape of :attr:`mask` must be broadcastable with the shape of the underlying tensor.
Expand Down Expand Up @@ -72,7 +72,7 @@ def masked_fill_op(tensor, mask, value):
# [-1.9009, 8.7654, 8.7654, 8.7654]]], dtype=oneflow.float32)

"""
return MaskedFill(value)(tensor, mask)
return MaskedFill(value)(input, mask)


if __name__ == "__main__":
Expand Down
140 changes: 70 additions & 70 deletions oneflow/python/nn/modules/math_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ def forward(self, x, y):
@oneflow_export("mul")
@register_tensor_op("mul")
@experimental_api
def _mul(x, y):
r"""Computes the multiplication of x by y for each element, scalar and broadcast promotation are supported.
def _mul(input, other):
r"""Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported.

The formula is:

.. math::
out = x \times y
out = input \times other

For example:

Expand All @@ -83,40 +83,40 @@ def _mul(x, y):
>>> flow.enable_eager_execution()

# element-wise multiply
>>> x = flow.Tensor(np.random.randn(2,3))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(2,3))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)

# scalar mutiply
>>> x = 5
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(x,y).numpy()
>>> input = 5
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)

# broadcast mutiply
>>> x = flow.Tensor(np.random.randn(1,1))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(1,1))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)

"""

if isinstance(x, (int, float)):
return ScalarMul(x)(y)
elif isinstance(y, (int, float)):
return ScalarMul(y)(x)
elif x.shape == y.shape:
return ElementwiseMul()(x, y)
elif x.shape == (1,):
return ScalarMulByTensor()(y, x)
elif y.shape == (1,):
return ScalarMulByTensor()(x, y)
if isinstance(input, (int, float)):
return ScalarMul(input)(other)
elif isinstance(other, (int, float)):
return ScalarMul(other)(input)
elif input.shape == other.shape:
return ElementwiseMul()(input, other)
elif input.shape == (1,):
return ScalarMulByTensor()(other, input)
elif other.shape == (1,):
return ScalarMulByTensor()(input, other)
else:
return BroadcastMul()(x, y)
return BroadcastMul()(input, other)


class Variance(Module):
Expand Down Expand Up @@ -207,12 +207,12 @@ def forward(self, x):
@oneflow_export("sub")
@register_tensor_op("sub")
@experimental_api
def _sub(x, y):
r"""Computes the subtraction of x by y for each element, scalar and broadcast promotation are supported.
def _sub(input, other):
r"""Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported.
The formula is:

.. math::
out = x - y
out = input - other

For example:

Expand All @@ -223,39 +223,39 @@ def _sub(x, y):
>>> flow.enable_eager_execution()

# element-wise subtract
>>> x = flow.Tensor(np.random.randn(2,3))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(2,3))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)

# scalar subtract
>>> x = 5
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(x,y).numpy()
>>> input = 5
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)

# broadcast subtract
>>> x = flow.Tensor(np.random.randn(1,1))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(1,1))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)

"""

if isinstance(x, (int, float)):
return ScalarAdd(x)(ScalarMul(-1)(y))
elif isinstance(y, (int, float)):
return ScalarAdd(-1 * y)(x)
elif x.shape == y.shape:
if isinstance(input, (int, float)):
return ScalarAdd(input)(ScalarMul(-1)(other))
elif isinstance(other, (int, float)):
return ScalarAdd(-1 * other)(input)
elif input.shape == other.shape:
# TODO: add element-wise op
return BroadcastSub()(x, y)
elif y.shape == (1,):
return ScalarSubByTensor()(x, y)
return BroadcastSub()(input, other)
elif other.shape == (1,):
return ScalarSubByTensor()(input, other)
else:
return BroadcastSub()(x, y)
return BroadcastSub()(input, other)


class BroadcastDiv(Module):
Expand All @@ -277,16 +277,16 @@ def forward(self, x, scalar):
@oneflow_export("div")
@register_tensor_op("div")
@experimental_api
def _div(x, y):
r"""Computes the division of x by y for each element, scalar and broadcast promotation are supported.
def _div(input, other):
r"""Computes the division of input by other for each element, scalar and broadcast promotation are supported.
The formula is:

.. math::
out = \frac{X}{Y}
out = \frac{input}{other}

Args:
x (Union[int, float, flow.Tensor]): X.
y (Union[int, float, flow.Tensor]): Y.
input (Union[int, float, flow.Tensor]): input.
other (Union[int, float, flow.Tensor]): other.

For example:

Expand All @@ -297,42 +297,42 @@ def _div(x, y):
>>> flow.enable_eager_execution()

# element-wise divide
>>> x = flow.Tensor(np.random.randn(2,3))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(2,3))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)

# scalar divide
>>> x = 5
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(x,y).numpy()
>>> input = 5
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)

# broadcast divide
>>> x = flow.Tensor(np.random.randn(1,1))
>>> y = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(x,y).numpy()
>>> input = flow.Tensor(np.random.randn(1,1))
>>> other = flow.Tensor(np.random.randn(2,3))
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)

"""

if isinstance(x, (int, float)):
return ScalarMul(x)(flow.experimental.reciprocal(y))
elif isinstance(y, (int, float)):
if y == 0 or y == 0.0:
y = 0.0
if isinstance(input, (int, float)):
return ScalarMul(input)(flow.experimental.reciprocal(other))
elif isinstance(other, (int, float)):
if other == 0 or other == 0.0:
other = 0.0
else:
y = 1.0 / (float(y))
return ScalarMul(y)(x)
elif x.shape == y.shape:
return BroadcastDiv()(x, y)
elif y.shape == (1,):
return ScalarDivByTensor()(x, y)
other = 1.0 / (float(other))
return ScalarMul(other)(input)
elif input.shape == other.shape:
return BroadcastDiv()(input, other)
elif other.shape == (1,):
return ScalarDivByTensor()(input, other)
else:
return BroadcastDiv()(x, y)
return BroadcastDiv()(input, other)


class Reciprocal(Module):
Expand Down
29 changes: 29 additions & 0 deletions oneflow/python/test/modules/test_div.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import oneflow.experimental as flow
from test_util import GenArgList
from automated_test_util import *


def _test_div_impl(test_case, shape, device):
Expand Down Expand Up @@ -78,6 +79,34 @@ def test_div(test_case):
for arg in GenArgList(arg_dict):
_test_div_impl(test_case, *arg)

def test_sub_against_pytorch(test_case):
arg_dict = OrderedDict()
arg_dict["test_type"] = [test_flow_against_pytorch, test_tensor_against_pytorch]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["op"] = ["div"]
for arg in GenArgList(arg_dict):
arg[0](
test_case,
arg[2],
extra_annotations={"other": flow.Tensor},
extra_generators={
"input": random_tensor(ndim=2, dim0=2, dim1=3),
"other": random_tensor(ndim=2, dim0=2, dim1=3),
},
device=arg[1],
)

arg[0](
test_case,
arg[2],
extra_annotations={"other": float},
extra_generators={
"input": random_tensor(ndim=2, dim0=2, dim1=3),
"other": random(0, 5),
},
device=arg[1],
)


if __name__ == "__main__":
unittest.main()