Skip to content

Commit

Permalink
Add the max, min, maximum, minimum api for the API 2.0
Browse files Browse the repository at this point in the history
* Add the max, min, maximum, minimum api for the API 2.0, test=develop
  • Loading branch information
wawltor committed Aug 12, 2020
1 parent 13b80d9 commit 9c17b3c
Show file tree
Hide file tree
Showing 9 changed files with 533 additions and 165 deletions.
11 changes: 8 additions & 3 deletions paddle/fluid/operators/elementwise/elementwise_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,13 @@ class ElementwiseOp : public framework::OperatorWithKernel {
auto y_dims = ctx->GetInputDim("Y");
int max_dim = std::max(x_dims.size(), y_dims.size());
int axis = ctx->Attrs().Get<int>("axis");
axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true,
platform::errors::InvalidArgument(
"The axis range must be [%s, %s), but axis is %s. "
"Please set the axis again.",
-1 * max_dim, max_dim, axis));
axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1)
: axis);
std::vector<int> x_dims_array(max_dim);
std::vector<int> y_dims_array(max_dim);
std::vector<int> out_dims_array(max_dim);
Expand Down Expand Up @@ -132,8 +138,7 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
"Y.dimension must be a subsequence of x.dimension. And axis "
"is the start dimension index "
"for broadcasting Y onto X. ")
.SetDefault(-1)
.EqualGreaterThan(-1);
.SetDefault(-1);
AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
.SetDefault(false);
AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,6 @@
from .tensor.math import elementwise_add #DEFINE_ALIAS
from .tensor.math import elementwise_div #DEFINE_ALIAS
from .tensor.math import elementwise_floordiv #DEFINE_ALIAS
from .tensor.math import elementwise_max #DEFINE_ALIAS
from .tensor.math import elementwise_min #DEFINE_ALIAS
from .tensor.math import elementwise_mod #DEFINE_ALIAS
from .tensor.math import elementwise_pow #DEFINE_ALIAS
from .tensor.math import elementwise_sub #DEFINE_ALIAS
Expand Down Expand Up @@ -164,7 +162,9 @@
from .tensor.math import tanh #DEFINE_ALIAS
from .tensor.math import elementwise_sum #DEFINE_ALIAS
from .tensor.math import max #DEFINE_ALIAS
from .tensor.math import maximum #DEFINE_ALIAS
from .tensor.math import min #DEFINE_ALIAS
from .tensor.math import minimum #DEFINE_ALIAS
from .tensor.math import mm #DEFINE_ALIAS
from .tensor.math import div #DEFINE_ALIAS
from .tensor.math import multiply #DEFINE_ALIAS
Expand Down
69 changes: 69 additions & 0 deletions python/paddle/fluid/tests/unittests/test_max_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core


class ApiMaxTest(unittest.TestCase):
def setUp(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()

def test_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.nn.data("data", shape=[10, 10], dtype="float32")
result_max = paddle.max(x=data, axis=1)
exe = paddle.static.Executor(self.place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)

with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.nn.data("data", shape=[10, 10], dtype="int64")
result_max = paddle.max(x=data, axis=0)
exe = paddle.static.Executor(self.place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=0)).all(), True)

def test_errors(self):
paddle.enable_static()

def test_input_type():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = np.random.rand(10, 10)
result_max = paddle.max(x=data, axis=0)

self.assertRaises(TypeError, test_input_type)

def test_imperative_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(np_x)
z = paddle.max(x, axis=0)
np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
80 changes: 80 additions & 0 deletions python/paddle/fluid/tests/unittests/test_maximum_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core


class ApiMaximumTest(unittest.TestCase):
def setUp(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()

self.input_x = np.random.rand(10, 15).astype("float32")
self.input_y = np.random.rand(10, 15).astype("float32")
self.input_z = np.random.rand(15).astype("float32")

def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32")
data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32")
result_max = paddle.maximum(data_x, data_y)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[result_max])
self.assertEqual((res == np.maximum(self.input_x, self.input_y)).all(),
True)

with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32")
data_z = paddle.nn.data("z", shape=[15], dtype="float32")
result_max = paddle.maximum(data_x, data_z, axis=1)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"z": self.input_z},
fetch_list=[result_max])
self.assertEqual((res == np.maximum(self.input_x, self.input_z)).all(),
True)

def test_dynamic_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
z = paddle.maximum(x, y)
np_z = z.numpy()
z_expected = np.array(np.maximum(self.input_x, self.input_y))
self.assertEqual((np_z == z_expected).all(), True)

def test_broadcast_axis(self):
paddle.disable_static()
np_x = np.random.rand(5, 4, 3, 2).astype("float64")
np_y = np.random.rand(4, 3).astype("float64")

x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
result_1 = paddle.maximum(x, y, axis=1)
result_2 = paddle.maximum(x, y, axis=-2)
self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
69 changes: 69 additions & 0 deletions python/paddle/fluid/tests/unittests/test_min_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core


class ApiMinTest(unittest.TestCase):
def setUp(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()

def test_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.nn.data("data", shape=[10, 10], dtype="float32")
result_min = paddle.min(x=data, axis=1)
exe = paddle.static.Executor(self.place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=1)).all(), True)

with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.nn.data("data", shape=[10, 10], dtype="int64")
result_min = paddle.min(x=data, axis=0)
exe = paddle.static.Executor(self.place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_min])
self.assertEqual((res == np.min(input_data, axis=0)).all(), True)

def test_errors(self):
paddle.enable_static()

def test_input_type():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = np.random.rand(10, 10)
result_min = paddle.min(x=data, axis=0)

self.assertRaises(TypeError, test_input_type)

def test_imperative_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(np_x)
z = paddle.min(x, axis=0)
np_z = z.numpy()
z_expected = np.array(np.min(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
80 changes: 80 additions & 0 deletions python/paddle/fluid/tests/unittests/test_minimum_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core


class ApiMinimumTest(unittest.TestCase):
def setUp(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()

self.input_x = np.random.rand(10, 15).astype("float32")
self.input_y = np.random.rand(10, 15).astype("float32")
self.input_z = np.random.rand(15).astype("float32")

def test_static_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32")
data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32")
result_min = paddle.minimum(data_x, data_y)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[result_min])
self.assertEqual((res == np.minimum(self.input_x, self.input_y)).all(),
True)

with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32")
data_z = paddle.nn.data("z", shape=[15], dtype="float32")
result_min = paddle.minimum(data_x, data_z, axis=1)
exe = paddle.static.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"z": self.input_z},
fetch_list=[result_min])
self.assertEqual((res == np.minimum(self.input_x, self.input_z)).all(),
True)

def test_dynamic_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
z = paddle.minimum(x, y)
np_z = z.numpy()
z_expected = np.array(np.minimum(self.input_x, self.input_y))
self.assertEqual((np_z == z_expected).all(), True)

def test_broadcast_axis(self):
paddle.disable_static()
np_x = np.random.rand(5, 4, 3, 2).astype("float64")
np_y = np.random.rand(4, 3).astype("float64")

x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
result_1 = paddle.minimum(x, y, axis=1)
result_2 = paddle.minimum(x, y, axis=-2)
self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
Loading

0 comments on commit 9c17b3c

Please sign in to comment.