diff --git a/benchmark/python/ffi/benchmark_ffi.py b/benchmark/python/ffi/benchmark_ffi.py index 42b9fcc16d7f..3f82b186423b 100644 --- a/benchmark/python/ffi/benchmark_ffi.py +++ b/benchmark/python/ffi/benchmark_ffi.py @@ -60,6 +60,14 @@ def prepare_workloads(): OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1))) OpArgMngr.add_workload("kron", pool['2x2'], pool['2x2']) OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2']) + OpArgMngr.add_workload("random.shuffle", pool['3']) + OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("less", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2']) + OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2']) OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2']) OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2']) OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2']) diff --git a/python/mxnet/ndarray/numpy/_op.py b/python/mxnet/ndarray/numpy/_op.py index a311dc1b80fc..614a8cb4fadb 100644 --- a/python/mxnet/ndarray/numpy/_op.py +++ b/python/mxnet/ndarray/numpy/_op.py @@ -4536,7 +4536,9 @@ def maximum(x1, x2, out=None, **kwargs): ------- out : mxnet.numpy.ndarray or scalar The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.""" - return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.maximum(x1, x2, out=out) + return _api_internal.maximum(x1, x2, out) @set_module('mxnet.ndarray.numpy') @@ -4576,7 +4578,9 @@ def minimum(x1, x2, out=None, **kwargs): ------- out : mxnet.numpy.ndarray or scalar The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.""" - return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.minimum(x1, x2, out=out) + return _api_internal.minimum(x1, x2, out) @set_module('mxnet.ndarray.numpy') @@ -6612,7 +6616,9 @@ def equal(x1, x2, out=None): >>> np.equal(1, np.ones(1)) array([ True]) """ - return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.equal(x1, x2, out=out) + return _api_internal.equal(x1, x2, out) @set_module('mxnet.ndarray.numpy') @@ -6644,7 +6650,10 @@ def not_equal(x1, x2, out=None): >>> np.not_equal(1, np.ones(1)) array([False]) """ - return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.not_equal(x1, x2, out=out) + return _api_internal.not_equal(x1, x2, out) + @set_module('mxnet.ndarray.numpy') @@ -6709,7 +6718,9 @@ def less(x1, x2, out=None): >>> np.less(1, np.ones(1)) array([False]) """ - return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.less(x1, x2, out=out) + return _api_internal.less(x1, x2, out) @set_module('mxnet.ndarray.numpy') @@ -6741,8 +6752,10 @@ def greater_equal(x1, x2, out=None): >>> np.greater_equal(1, np.ones(1)) array([True]) """ - return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar, - _npi.less_equal_scalar, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.greater_equal(x1, x2, out=out) + return _api_internal.greater_equal(x1, x2, out) + @set_module('mxnet.ndarray.numpy') @@ -6774,8 +6787,9 @@ def less_equal(x1, x2, out=None): >>> np.less_equal(1, np.ones(1)) array([True]) """ - return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar, - _npi.greater_equal_scalar, out) + if isinstance(x1, numeric_types) and isinstance(x2, numeric_types): + return _np.less_equal(x1, x2, out=out) + return _api_internal.less_equal(x1, x2, out) @set_module('mxnet.ndarray.numpy') diff --git a/python/mxnet/ndarray/numpy/random.py b/python/mxnet/ndarray/numpy/random.py index f6e5bce00e75..4f5f024f9236 100644 --- a/python/mxnet/ndarray/numpy/random.py +++ b/python/mxnet/ndarray/numpy/random.py @@ -990,7 +990,7 @@ def shuffle(x): [3., 4., 5.], [0., 1., 2.]]) """ - _npi.shuffle(x, out=x) + _api_internal.shuffle(x, x) def laplace(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None): diff --git a/src/api/operator/numpy/np_elemwise_broadcast_logic_op.cc b/src/api/operator/numpy/np_elemwise_broadcast_logic_op.cc new file mode 100644 index 000000000000..f0ca4081b2c8 --- /dev/null +++ b/src/api/operator/numpy/np_elemwise_broadcast_logic_op.cc @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file np_elemwise_broadcast_logic_op.cc + * \brief Implementation of the API of functions in src/operator/numpy/np_elemwise_broadcast_logic_op.cc + */ +#include +#include +#include "../utils.h" +#include "../ufunc_helper.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.equal") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_equal"); + const nnvm::Op* op_scalar = Op::Get("_npi_equal_scalar"); + UFuncHelper(args, ret, op, op_scalar, nullptr); +}); + +MXNET_REGISTER_API("_npi.not_equal") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_not_equal"); + const nnvm::Op* op_scalar = Op::Get("_npi_not_equal_scalar"); + UFuncHelper(args, ret, op, op_scalar, nullptr); +}); + +MXNET_REGISTER_API("_npi.less") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_less"); + const nnvm::Op* op_scalar = Op::Get("_npi_less_scalar"); + const nnvm::Op* op_rscalar = Op::Get("_npi_less_scalar"); + UFuncHelper(args, ret, op, op_scalar, op_rscalar); +}); + +MXNET_REGISTER_API("_npi.greater_equal") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_greater_equal"); + const nnvm::Op* op_scalar = Op::Get("_npi_greater_equal_scalar"); + const nnvm::Op* op_rscalar = Op::Get("_npi_greater_equal_scalar"); + UFuncHelper(args, ret, op, op_scalar, op_rscalar); +}); + +MXNET_REGISTER_API("_npi.less_equal") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_less_equal"); + const nnvm::Op* op_scalar = Op::Get("_npi_less_equal_scalar"); + const nnvm::Op* op_rscalar = Op::Get("_npi_less_equal_scalar"); + UFuncHelper(args, ret, op, op_scalar, op_rscalar); +}); + +} // namespace mxnet diff --git a/src/api/operator/random/shuffle_op.cc b/src/api/operator/random/shuffle_op.cc new file mode 100644 index 000000000000..222451cb0f3b --- /dev/null +++ b/src/api/operator/random/shuffle_op.cc @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file shuffle_op.cc + * \brief Implementation of the API of functions in src/operator/random/shuffle_op.cc + */ +#include +#include +#include "../utils.h" +#include "../../../operator/elemwise_op_common.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.shuffle") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_shuffle"); + nnvm::NodeAttrs attrs; + + NDArray* inputs[1]; + int num_inputs = 1; + + if (args[0].type_code() != kNull) { + inputs[0] = args[0].operator mxnet::NDArray *(); + } + + attrs.op = op; + + NDArray* out = args[1].operator mxnet::NDArray*(); + NDArray** outputs = out == nullptr ? nullptr : &out; + int num_outputs = out != nullptr; + auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, outputs); + if (out) { + *ret = PythonArg(1); + } else { + *ret = ndoutputs[0]; + } +}); + +} // namespace mxnet diff --git a/src/api/operator/tensor/elemwise_binary_broadcast_op_extended.cc b/src/api/operator/tensor/elemwise_binary_broadcast_op_extended.cc new file mode 100644 index 000000000000..f25e30a8b081 --- /dev/null +++ b/src/api/operator/tensor/elemwise_binary_broadcast_op_extended.cc @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file elemwise_binary_broadcast_op_extended.cc + * \brief Implementation of the API of functions in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc + */ +#include +#include +#include "../utils.h" +#include "../ufunc_helper.h" + +namespace mxnet { + +MXNET_REGISTER_API("_npi.maximum") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_maximum"); + const nnvm::Op* op_scalar = Op::Get("_npi_maximum_scalar"); + UFuncHelper(args, ret, op, op_scalar, nullptr); +}); + +MXNET_REGISTER_API("_npi.minimum") +.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { + using namespace runtime; + const nnvm::Op* op = Op::Get("_npi_minimum"); + const nnvm::Op* op_scalar = Op::Get("_npi_minimum_scalar"); + UFuncHelper(args, ret, op, op_scalar, nullptr); +}); + +} // namespace mxnet