Permalink
Browse files

[OP] Topk and arange + Update submodules (#4565)

* initial add topk arange

fix arange

Put back ones and zeros

fix

fix default ctx

* fix lint

* Update Submodules

* Update MShadow

* fix warning

* fix pylint

* style fix
  • Loading branch information...
1 parent 3a820bf commit 9f9c135bbb8853351c927caf5c53e5a9524f156f @sxjscience sxjscience committed on GitHub Jan 7, 2017
@@ -1049,9 +1049,11 @@ def zeros(shape, ctx=None, dtype=mx_real_t):
out: Array
The created NDArray.
"""
- arr = empty(shape, ctx, dtype)
- arr[:] = 0.0
- return arr
+ if ctx is None:
+ ctx = Context.default_ctx
+ # pylint: disable= no-member, protected-access
+ return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype)
+ # pylint: enable= no-member, protected-access
def ones(shape, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with 1, with specified shape.
@@ -1068,9 +1070,11 @@ def ones(shape, ctx=None, dtype=mx_real_t):
out: Array
The created NDArray.
"""
- arr = empty(shape, ctx, dtype)
- arr[:] = 1.0
- return arr
+ if ctx is None:
+ ctx = Context.default_ctx
+ # pylint: disable= no-member, protected-access
+ return _internal._ones(shape=shape, ctx=ctx, dtype=dtype)
+ # pylint: enable= no-member, protected-access
def full(shape, val, ctx=None, dtype=mx_real_t):
"""Create a new NDArray filled with given value, with specified shape.
@@ -1174,6 +1178,39 @@ def concatenate(arrays, axis=0, always_copy=True):
return ret
+# pylint: disable= no-member, protected-access, too-many-arguments
+def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t):
+ """Simlar function in the MXNet ndarray as numpy.arange
+ See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
+
+ Parameters
+ ----------
+ start : number, optional
+ Start of interval. The interval includes this value. The default start value is 0.
+ stop : number, optional
+ End of interval. The interval does not include this value.
+ step : number, optional
+ Spacing between values
+ repeat : number, optional
+ "The repeating time of all elements.
+ E.g repeat=3, the element a will be repeated three times --> a, a, a.
+ ctx : Context, optional
+ The context of the NDArray, default to current default context.
+ dtype : type, optional
+ The value type of the NDArray, default to np.float32
+
+ Returns
+ -------
+ out : NDArray
+ The created NDArray
+ """
+ if ctx is None:
+ ctx = Context.default_ctx
+ return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
+ dtype=dtype, ctx=str(ctx))
+# pylint: enable= no-member, protected-access, too-many-arguments
+
+
def load(fname):
"""Load ndarray from binary file.
@@ -1186,3 +1186,68 @@ def hypot(left, right):
return _numpy.hypot(left, right)
else:
raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
+
+
+def zeros(shape, dtype=_numpy.float32):
+ """Create a Tensor filled with zeros, similar to numpy.zeros
+ See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array.
+ dtype : type, optional
+ The value type of the NDArray, default to np.float32
+
+ Returns
+ -------
+ out : Symbol
+ The created Symbol
+ """
+ return _internal._zeros(shape=shape, dtype=dtype)
+
+
+def ones(shape, dtype=_numpy.float32):
+ """Create a Tensor filled with ones, similar to numpy.ones
+ See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.ones.html.
+
+ Parameters
+ ----------
+ shape : int or sequence of ints
+ Shape of the new array.
+ dtype : type, optional
+ The value type of the NDArray, default to np.float32
+
+ Returns
+ -------
+ out : Symbol
+ The created Symbol
+ """
+ return _internal._ones(shape=shape, dtype=dtype)
+
+
+def arange(start, stop=None, step=1.0, repeat=1, name=None, dtype=_numpy.float32):
+ """Simlar function in the MXNet ndarray as numpy.arange
+ See Also https://docs.scipy.org/doc/numpy/reference/generated/numpy.arange.html.
+
+ Parameters
+ ----------
+ start : number
+ Start of interval. The interval includes this value. The default start value is 0.
+ stop : number, optional
+ End of interval. The interval does not include this value.
+ step : number, optional
+ Spacing between values
+ repeat : int, optional
+ "The repeating time of all elements.
+ E.g repeat=3, the element a will be repeated three times --> a, a, a.
+ dtype : type, optional
+ The value type of the NDArray, default to np.float32
+
+ Returns
+ -------
+ out : Symbol
+ The created Symbol
+ """
+ return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
+ name=name, dtype=dtype)
@@ -1,129 +0,0 @@
-/*!
- * Copyright (c) 2015 by Contributors
- * \file block_grad-inl.h
- * \brief
- * \author Bing Xu
-*/
-#ifndef MXNET_OPERATOR_BLOCK_GRAD_INL_H_
-#define MXNET_OPERATOR_BLOCK_GRAD_INL_H_
-#include <dmlc/logging.h>
-#include <mxnet/operator.h>
-#include <cstring>
-#include <map>
-#include <string>
-#include <vector>
-#include <utility>
-#include "./mshadow_op.h"
-#include "./operator_common.h"
-
-namespace mxnet {
-namespace op {
-
-namespace blockgrad {
-enum BlockGradientOpInputs {kData};
-enum BlockGradientOpOutputs {kOut};
-} // namespace blockgrad
-
-template<typename xpu, typename DType>
-class BlockGradientOp : public Operator {
- public:
- virtual void Forward(const OpContext &ctx,
- const std::vector<TBlob> &in_data,
- const std::vector<OpReqType> &req,
- const std::vector<TBlob> &out_data,
- const std::vector<TBlob> &aux_args) {
- using namespace mshadow;
- using namespace mshadow::expr;
- CHECK_EQ(in_data.size(), 1);
- CHECK_EQ(out_data.size(), 1);
- Stream<xpu> *s = ctx.get_stream<xpu>();
- Tensor<xpu, 2, DType> data = in_data[blockgrad::kData].FlatTo2D<xpu, DType>(s);
- Tensor<xpu, 2, DType> out = out_data[blockgrad::kOut].FlatTo2D<xpu, DType>(s);
- out = F<mshadow_op::identity>(data);
- }
-
- virtual void Backward(const OpContext &ctx,
- const std::vector<TBlob> &out_grad,
- const std::vector<TBlob> &in_data,
- const std::vector<TBlob> &out_data,
- const std::vector<OpReqType> &req,
- const std::vector<TBlob> &in_grad,
- const std::vector<TBlob> &aux_args) {
- using namespace mshadow;
- using namespace mshadow::expr;
- Stream<xpu> *s = ctx.get_stream<xpu>();
- Tensor<xpu, 2, DType> grad = in_grad[blockgrad::kData].FlatTo2D<xpu, DType>(s);
- grad = 0.f;
- }
-}; // class BlockGradientOp
-
-template<typename xpu>
-Operator *CreateOp(int dtype);
-
-#if DMLC_USE_CXX11
-class BlockGradientProp : public OperatorProperty {
- public:
- void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {}
-
- std::map<std::string, std::string> GetParams() const override {
- return std::map<std::string, std::string>();
- }
-
- bool InferShape(std::vector<TShape> *in_shape,
- std::vector<TShape> *out_shape,
- std::vector<TShape> *aux_shape) const override {
- using namespace mshadow;
- CHECK_EQ(in_shape->size(), 1);
- const TShape &dshape = in_shape->at(blockgrad::kData);
- if (dshape.ndim() == 0) return false;
- out_shape->clear();
- out_shape->push_back(dshape);
- return true;
- }
-
- bool InferType(std::vector<int> *in_type,
- std::vector<int> *out_type,
- std::vector<int> *aux_type) const override {
- CHECK_EQ(in_type->size(), 1);
- int dtype = (*in_type)[0];
- CHECK_NE(dtype, -1) << "Input must have specified type";
- out_type->clear();
- out_type->push_back(dtype);
- return true;
- }
-
- OperatorProperty* Copy() const override {
- return new BlockGradientProp();
- }
-
- std::string TypeString() const override {
- return "BlockGrad";
- }
-
- std::vector<int> DeclareBackwardDependency(
- const std::vector<int> &out_grad,
- const std::vector<int> &in_data,
- const std::vector<int> &out_data) const override {
- return {};
- }
-
- std::vector<std::pair<int, void*> > ForwardInplaceOption(
- const std::vector<int> &in_data,
- const std::vector<void*> &out_data) const override {
- return {{in_data[blockgrad::kData], out_data[blockgrad::kOut]}};
- }
-
- Operator* CreateOperator(Context ctx) const override {
- LOG(FATAL) << "Not Implemented";
- return NULL;
- }
-
- Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape,
- std::vector<int> *in_type) const override;
-}; // class BlockGradientProperty
-
-#endif // DMLC_USE_CXX11
-} // namespace op
-} // namespace mxnet
-
-#endif // MXNET_OPERATOR_BLOCK_GRAD_INL_H_
@@ -1,35 +0,0 @@
-/*!
- * Copyright (c) 2015 by Contributors
- * \file block_grad.cc
- * \brief
- * \author Bing Xu
-*/
-#include "./block_grad-inl.h"
-
-namespace mxnet {
-namespace op {
-template<>
-Operator *CreateOp<cpu>(int dtype) {
- Operator *op = NULL;
- MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
- op = new BlockGradientOp<cpu, DType>();
- });
- return op;
-}
-
-Operator *BlockGradientProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape,
- std::vector<int> *in_type) const {
- std::vector<TShape> out_shape, aux_shape;
- std::vector<int> out_type, aux_type;
- CHECK(InferType(in_type, &out_type, &aux_type));
- CHECK(InferShape(in_shape, &out_shape, &aux_shape));
- DO_BIND_DISPATCH(CreateOp, in_type->at(0));
-}
-
-MXNET_REGISTER_OP_PROPERTY(BlockGrad, BlockGradientProp)
-.describe("Get output from a symbol and pass 0 gradient back")
-.add_argument("data", "Symbol", "Input data.");
-
-} // namespace op
-} // namespace mxnet
-
@@ -1,22 +0,0 @@
-/*!
- * Copyright (c) 2015 by Contributors
- * \file block_grad.cc
- * \brief
- * \author Bing Xu
-*/
-#include "./block_grad-inl.h"
-
-namespace mxnet {
-namespace op {
-template<>
-Operator *CreateOp<gpu>(int dtype) {
- Operator *op = NULL;
- MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
- op = new BlockGradientOp<gpu, DType>();
- });
- return op;
-}
-
-} // namespace op
-} // namespace mxnet
-
@@ -9,11 +9,19 @@ namespace mxnet {
namespace op {
MXNET_OPERATOR_REGISTER_REDUCE_AXIS(argmax)
.MXNET_DESCRIBE("Compute argmax")
-.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::maximum>);
+.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::maximum>)
+.set_attr<nnvm::FGradient>("FGradient",
+ [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+ return MakeGradNode("_zeros", n, {}, {});
+});
MXNET_OPERATOR_REGISTER_REDUCE_AXIS(argmin)
.MXNET_DESCRIBE("Compute argmin")
-.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::minimum>);
+.set_attr<FCompute>("FCompute<cpu>", SearchAxisCompute<cpu, mshadow::red::minimum>)
+.set_attr<nnvm::FGradient>("FGradient",
+ [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+ return MakeGradNode("_zeros", n, {}, {});
+});
// Legacy support
NNVM_REGISTER_OP(argmax_channel)
@@ -26,6 +26,15 @@ NNVM_REGISTER_OP(_backward_copy)
})
.set_attr<FCompute>("FCompute<cpu>", IdentityCompute<cpu>);
+MXNET_OPERATOR_REGISTER_UNARY(BlockGrad)
+.MXNET_DESCRIBE("Get output from a symbol and pass 0 gradient back")
+.set_attr<FCompute>("FCompute<cpu>", IdentityCompute<cpu>)
+.set_attr<nnvm::FGradient>("FGradient",
+ [](const nnvm::NodePtr& n, const std::vector<nnvm::NodeEntry>& ograds) {
+ // pass back zero gradient
+ return MakeGradNode("_zeros", n, {}, {});
+});
+
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_num_inputs(2)
@@ -15,6 +15,9 @@ NNVM_REGISTER_OP(_copy)
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);
+NNVM_REGISTER_OP(BlockGrad)
+.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);
+
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", IdentityCompute<gpu>);
Oops, something went wrong.

0 comments on commit 9f9c135

Please sign in to comment.