Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions python/mxnet/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -2019,7 +2019,7 @@ def negative(arr):
return multiply(arr, -1.0)

def load(fname):
"""Load array from file.
"""Loads an array from file.

See more details in ``save``.

Expand Down Expand Up @@ -2053,7 +2053,7 @@ def load(fname):


def save(fname, data):
"""Save a list of arrays of a str->array dict into file.
"""Saves a list of arrays or a dict of str->array to file.

Examples of filenames:

Expand All @@ -2066,7 +2066,7 @@ def save(fname, data):
fname : str
The filename.
data : list of ``NDArray` or dict of str to ``NDArray``
The data for saving.
The data to save.

Examples
--------
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from ._ndarray_internal import _sample_gennegbinomial as generalized_negative_binomial

def seed(seed_state):
"""Seed the random number generators in MXNet.
"""Seeds the random number generators in MXNet.

This seed will affect behavior of functions in this module.
It also affects the results from executors that contain random numbers
Expand Down
5 changes: 2 additions & 3 deletions src/operator/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,9 @@ Operator *ActivationProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_
DMLC_REGISTER_PARAMETER(ActivationParam);

MXNET_REGISTER_OP_PROPERTY(Activation, ActivationProp)
.describe(R"code(Elementwise activation function.
The activation operations are applied element-wise to each array elements.
.describe(R"code(Applies an activation function element-wise to the input.

The following types are supported:
The following activation functions are supported:

- `relu`: Rectified Linear Unit, :math:`y = max(x, 0)`
- `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}`
Expand Down
2 changes: 1 addition & 1 deletion src/operator/bilinear_sampler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ MXNET_REGISTER_OP_PROPERTY(BilinearSampler, BilinearSamplerProp)
.add_argument("grid", "NDArray-or-Symbol", "Input grid to the BilinearsamplerOp."
"grid has two channels: x_src, y_src")
.add_arguments(BilinearSamplerParam::__FIELDS__())
.describe("Apply bilinear sampling to input feature map,"
.describe("Applies bilinear sampling to input feature map,"
" which is the key of \"[NIPS2015] Spatial Transformer Networks\"\n "
"output[batch, channel, y_dst, x_dst] = G(data[batch, channel, y_src, x_src)\n "
"x_dst, y_dst enumerate all spatial locations in output\n "
Expand Down
2 changes: 1 addition & 1 deletion src/operator/concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ Operator* ConcatProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_shap
DMLC_REGISTER_PARAMETER(ConcatParam);

MXNET_REGISTER_OP_PROPERTY(Concat, ConcatProp)
.describe(R"code(Join input arrays along the given axis.
.describe(R"code(Joins input arrays along a given axis.

.. note:: `Concat` is deprecated. Use `concat` instead.

Expand Down
2 changes: 1 addition & 1 deletion src/operator/correlation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ Operator* CorrelationProp::CreateOperator(Context ctx) const {
}
DMLC_REGISTER_PARAMETER(CorrelationParam);
MXNET_REGISTER_OP_PROPERTY(Correlation, CorrelationProp)
.describe("Apply correlation to inputs")
.describe("Applies correlation to inputs.")
.add_argument("data1", "NDArray-or-Symbol", "Input data1 to the correlation.")
.add_argument("data2", "NDArray-or-Symbol", "Input data2 to the correlation.")
.add_arguments(CorrelationParam::__FIELDS__());
Expand Down
2 changes: 1 addition & 1 deletion src/operator/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ MXNET_REGISTER_OP_PROPERTY(Deconvolution, DeconvolutionProp)
.add_argument("weight", "NDArray-or-Symbol", "Weight matrix.")
.add_argument("bias", "NDArray-or-Symbol", "Bias parameter.")
.add_arguments(DeconvolutionParam::__FIELDS__())
.describe("Apply deconvolution to input then add a bias.");
.describe("Applies deconvolution to input and adds a bias.");

} // namespace op
} // namespace mxnet
2 changes: 1 addition & 1 deletion src/operator/dropout.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Operator *DropoutProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_sha
DMLC_REGISTER_PARAMETER(DropoutParam);

MXNET_REGISTER_OP_PROPERTY(Dropout, DropoutProp)
.describe(R"(Apply dropout to input.
.describe(R"(Applies dropout to input.
During training, each element of the input is randomly set to zero with probability p.
And then the whole tensor is rescaled by 1/(1-p) to keep the expectation the same as
before applying dropout. During the test time, this behaves as an identity map.
Expand Down
2 changes: 1 addition & 1 deletion src/operator/fully_connected.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ Operator *FullyConnectedProp::CreateOperatorEx(Context ctx, std::vector<TShape>
DMLC_REGISTER_PARAMETER(FullyConnectedParam);

MXNET_REGISTER_OP_PROPERTY(FullyConnected, FullyConnectedProp)
.describe(R"code(Apply a linear transformation: :math:`Y = XW^T + b`.
.describe(R"code(Applies a linear transformation: :math:`Y = XW^T + b`.

Shapes:

Expand Down
2 changes: 1 addition & 1 deletion src/operator/grid_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ MXNET_REGISTER_OP_PROPERTY(GridGenerator, GridGeneratorProp)
.describe("if transformation type is affine, data is affine matrix : (batch, 6)")
.describe("if transformation type is warp, data is optical flow : (batch, 2, h, w)")
.add_arguments(GridGeneratorParam::__FIELDS__())
.describe("generate sampling grid for bilinear sampling.");
.describe("Generates sampling grid for bilinear sampling.");

} // namespace op
} // namespace mxnet
2 changes: 1 addition & 1 deletion src/operator/leaky_relu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Operator *LeakyReLUProp::CreateOperator(Context ctx) const {
DMLC_REGISTER_PARAMETER(LeakyReLUParam);

MXNET_REGISTER_OP_PROPERTY(LeakyReLU, LeakyReLUProp)
.describe(R"code(Leaky ReLu activation
.describe(R"code(Applies leaky ReLU activation element-wise to the input.

The following types are supported:

Expand Down
2 changes: 1 addition & 1 deletion src/operator/lrn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ DMLC_REGISTER_PARAMETER(LRNParam);
MXNET_REGISTER_OP_PROPERTY(LRN, LocalResponseNormProp)
.add_argument("data", "NDArray-or-Symbol", "Input data to the ConvolutionOp.")
.add_arguments(LRNParam::__FIELDS__())
.describe("Apply convolution to input then add a bias.");
.describe("Applies convolution to input and then adds a bias.");

} // namespace op
} // namespace mxnet
2 changes: 1 addition & 1 deletion src/operator/nn/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_softmax)
mxnet_op::softmax_bwd>);

MXNET_OPERATOR_REGISTER_UNARY(log_softmax)
.describe(R"code(Compute the log softmax of the input.
.describe(R"code(Computes the log softmax of the input.
This is equivalent to computing softmax followed by log.

Examples::
Expand Down
4 changes: 2 additions & 2 deletions src/operator/pad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -400,9 +400,9 @@ Operator *PadProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape,
DMLC_REGISTER_PARAMETER(PadParam);

MXNET_REGISTER_OP_PROPERTY(Pad, PadProp)
.describe(R"code(Pad an array.
.describe(R"code(Pads an array.

Only supports 4-D and 5-D input array.
Only supports 4-D and 5-D input arrays.

)code" ADD_FILELINE)
.add_argument("data", "NDArray-or-Symbol", "An n-dimensional input tensor.")
Expand Down
2 changes: 1 addition & 1 deletion src/operator/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ Operator* PoolingProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_sha
DMLC_REGISTER_PARAMETER(PoolingParam);

MXNET_REGISTER_OP_PROPERTY(Pooling, PoolingProp)
.describe(R"code(Perform pooling on the input.
.describe(R"code(Performs pooling on the input.

The shapes for 1-D pooling are

Expand Down
6 changes: 3 additions & 3 deletions src/operator/regression_output.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Operator *RegressionOutputProp<type>::CreateOperator(Context ctx) const {
DMLC_REGISTER_PARAMETER(RegressionOutputParam);

MXNET_REGISTER_OP_PROPERTY(LinearRegressionOutput, RegressionOutputProp<reg_enum::kLinear>)
.describe(R"code(LinearRegressionOutput computes and optimizes for squared loss.
.describe(R"code(Computes and optimizes for squared loss.

.. note::
Use the LinearRegressionOutput as the final output layer of a net.
Expand All @@ -48,7 +48,7 @@ The parameter `grad_scale` can be used to change this scale to `grad_scale/n`.
.add_arguments(RegressionOutputParam::__FIELDS__());

MXNET_REGISTER_OP_PROPERTY(MAERegressionOutput, RegressionOutputProp<reg_enum::kMAE>)
.describe(R"code(MAERegressionOutput function computes mean absolute error.
.describe(R"code(Computes mean absolute error of the input.

MAE is a risk metric corresponding to the expected value of the absolute error.

Expand All @@ -69,7 +69,7 @@ The parameter `grad_scale` can be used to change this scale to `grad_scale/n`.
.add_arguments(RegressionOutputParam::__FIELDS__());

MXNET_REGISTER_OP_PROPERTY(LogisticRegressionOutput, RegressionOutputProp<reg_enum::kLogistic>)
.describe(R"code(LogisticRegressionOutput applies a logistic function to the input.
.describe(R"code(Applies a logistic function to the input.

The logistic function, also known as the sigmoid function, is computed as
:math:`\frac{1}{1+exp(-x)}`.
Expand Down
2 changes: 1 addition & 1 deletion src/operator/rnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ Operator *RNNProp::CreateOperatorEx(Context ctx,
DMLC_REGISTER_PARAMETER(RNNParam);

MXNET_REGISTER_OP_PROPERTY(RNN, RNNProp)
.describe("Apply a recurrent layer to input.")
.describe("Applies a recurrent layer to input.")
.add_argument("data", "NDArray-or-Symbol", "Input data to RNN")
.add_argument("parameters", "NDArray-or-Symbol",
"Vector of all RNN trainable parameters concatenated")
Expand Down
2 changes: 1 addition & 1 deletion src/operator/slice_channel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Operator* SliceChannelProp::CreateOperatorEx(Context ctx,
DMLC_REGISTER_PARAMETER(SliceChannelParam);

MXNET_REGISTER_OP_PROPERTY(SliceChannel, SliceChannelProp)
.describe(R"code(Split an array along a particular axis into multiple sub-arrays.
.describe(R"code(Splits an array along a particular axis into multiple sub-arrays.

Assume the input array has shape ``(d_0, ..., d_n)`` and we slice it into *m*
(``num_outputs=m``) subarrays along axis *k*, then we will obtain a list of *m*
Expand Down
2 changes: 1 addition & 1 deletion src/operator/softmax_activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ Operator *SoftmaxActivationProp::CreateOperator(Context ctx) const {
DMLC_REGISTER_PARAMETER(SoftmaxActivationParam);

MXNET_REGISTER_OP_PROPERTY(SoftmaxActivation, SoftmaxActivationProp)
.describe("Apply softmax activation to input. This is intended for internal layers. "
.describe("Applies softmax activation to input. This is intended for internal layers. "
"For output (loss layer) please use SoftmaxOutput. If mode=instance, "
"this operator will compute a softmax for each instance in the batch; "
"this is the default mode. If mode=channel, this operator will compute "
Expand Down
2 changes: 1 addition & 1 deletion src/operator/softmax_output.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ Operator *SoftmaxOutputProp::CreateOperatorEx(Context ctx, std::vector<TShape> *
DMLC_REGISTER_PARAMETER(SoftmaxOutputParam);

MXNET_REGISTER_OP_PROPERTY(SoftmaxOutput, SoftmaxOutputProp)
.describe(R"code(Softmax with logit loss.
.describe(R"code(Computes softmax with logit loss.

In the forward pass, the softmax output is returned. Assume the input data has
shape *(n,k)*, then the output will have the same shape as the input, which is computed by
Expand Down
2 changes: 1 addition & 1 deletion src/operator/spatial_transformer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ MXNET_REGISTER_OP_PROPERTY(SpatialTransformer, SpatialTransformerProp)
"localisation net, the output dim should be 6 when transform_type "
"is affine. You shold initialize the weight and bias with identity tranform.")
.add_arguments(SpatialTransformerParam::__FIELDS__())
.describe("Apply spatial transformer to input feature map.");
.describe("Applies a spatial transformer to input feature map.");

} // namespace op
} // namespace mxnet
2 changes: 1 addition & 1 deletion src/operator/swapaxis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ DMLC_REGISTER_PARAMETER(SwapAxisParam);
MXNET_REGISTER_OP_PROPERTY(SwapAxis, SwapAxisProp)
.add_argument("data", "NDArray-or-Symbol", "Input array.")
.add_arguments(SwapAxisParam::__FIELDS__())
.describe(R"code(Interchange two axes of an array.
.describe(R"code(Interchanges two axes of an array.

Examples::

Expand Down
6 changes: 3 additions & 3 deletions src/operator/tensor/broadcast_reduce_op_value.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ DMLC_REGISTER_PARAMETER(BroadcastAxesParam);
DMLC_REGISTER_PARAMETER(BroadcastToParam);

inline std::string get_reduce_axes_description(const std::string& op_name, int line) {
std::string doc = R"code(Compute the __op__ of array elements over given axes.
std::string doc = R"code(Computes the __op__ of array elements over given axes.


Defined in )code";
Expand Down Expand Up @@ -56,7 +56,7 @@ MXNET_OPERATOR_REGISTER_REDUCE_BACKWARD(_backward_prod)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesBackwardUseInOut< cpu, mshadow_op::rdiv>);

MXNET_OPERATOR_REGISTER_REDUCE(nansum)
.describe(R"code(Compute the sum of array elements over given axes treating Not a Numbers ``NaN`` as zero.
.describe(R"code(Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero.

)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesCompute<cpu, mshadow_op::nansum>)
Expand All @@ -67,7 +67,7 @@ MXNET_OPERATOR_REGISTER_REDUCE_BACKWARD(_backward_nansum)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesBackwardUseInOut<cpu, mshadow_op::nansum_grad>);

MXNET_OPERATOR_REGISTER_REDUCE(nanprod)
.describe(R"code(Compute the product of array elements over given axes treating Not a Numbers ``NaN`` as one.
.describe(R"code(Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one.

)code" ADD_FILELINE)
.set_attr<FCompute>("FCompute<cpu>", ReduceAxesCompute<cpu, mshadow_op::nanprod>)
Expand Down
2 changes: 1 addition & 1 deletion src/operator/tensor/elemwise_sum.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ bool ElementWiseSumType(const nnvm::NodeAttrs& attrs,

NNVM_REGISTER_OP(add_n)
.add_alias("ElementWiseSum")
.describe(R"doc(Add all input arguments element-wise.
.describe(R"doc(Adds all input arguments element-wise.

.. math::
add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n
Expand Down
4 changes: 2 additions & 2 deletions src/operator/tensor/elemwise_unary_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ NNVM_REGISTER_OP(_identity_with_attr_like_rhs)

NNVM_REGISTER_OP(Cast)
.add_alias("cast")
.describe(R"code(Casts all elements of the input to the new type.
.describe(R"code(Casts all elements of the input to a new type.

.. note:: ``Cast`` is deprecated. Use ``cast`` instead.

Expand Down Expand Up @@ -325,7 +325,7 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_log)

// sin
MXNET_OPERATOR_REGISTER_UNARY(sin)
.describe(R"code(Computes the element-wise sine of the input.
.describe(R"code(Computes the element-wise sine of the input array.

The input should be in radians (:math:`2\pi` rad equals 360 degrees).

Expand Down
19 changes: 9 additions & 10 deletions src/operator/tensor/matrix_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ DMLC_REGISTER_PARAMETER(ReverseParam);

NNVM_REGISTER_OP(Reshape)
.add_alias("reshape")
.describe(R"code(Reshapes the input array into a new shape.
.describe(R"code(Reshapes the input array.

.. note:: ``Reshape`` is deprecated, use ``reshape``

Expand Down Expand Up @@ -138,7 +138,7 @@ Example::
.add_argument("data", "NDArray-or-Symbol", "Input array.");

NNVM_REGISTER_OP(transpose)
.describe(R"code(Permute the dimensions of an array.
.describe(R"code(Permutes the dimensions of an array.

Examples::

Expand Down Expand Up @@ -196,7 +196,7 @@ Examples::


NNVM_REGISTER_OP(expand_dims)
.describe(R"code(Insert a new axis with size 1 into the array shape
.describe(R"code(Inserts a new axis of size 1 into the array shape

For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)``
will return a new array with shape ``(2,1,3,4)``.
Expand All @@ -218,7 +218,7 @@ will return a new array with shape ``(2,1,3,4)``.

NNVM_REGISTER_OP(slice)
.add_alias("crop")
.describe(R"code(Slice a continuous region of the array.
.describe(R"code(Slices a contiguous region of the array.

.. note:: ``crop`` is deprecated. Use ``slice`` instead.

Expand Down Expand Up @@ -300,7 +300,7 @@ NNVM_REGISTER_OP(_crop_assign_scalar)
.add_arguments(SimpleCropAssignScalarParam::__FIELDS__());

NNVM_REGISTER_OP(slice_axis)
.describe(R"code(Slice along a given axis.
.describe(R"code(Slices along a given axis.

Returns an array slice along a given `axis` starting from the `begin` index
to the `end` index.
Expand Down Expand Up @@ -422,7 +422,7 @@ NNVM_REGISTER_OP(_backward_batch_dot)
.set_attr<FCompute>("FCompute<cpu>", BatchDotBackward_<cpu>);

NNVM_REGISTER_OP(clip)
.describe(R"code(Clip (limit) the values in an array.
.describe(R"code(Clips (limits) the values in an array.

Given an interval, values outside the interval are clipped to the interval edges.
Clipping ``x`` between `a_min` and `a_x` would be::
Expand Down Expand Up @@ -454,7 +454,7 @@ NNVM_REGISTER_OP(_backward_clip)
.set_attr<FCompute>("FCompute<cpu>", ClipGrad_<cpu>);

NNVM_REGISTER_OP(repeat)
.describe(R"code(Repeat elements of an array.
.describe(R"code(Repeats elements of an array.

By default, ``repeat`` flattens the input array into 1-D and then repeats the
elements::
Expand Down Expand Up @@ -500,7 +500,7 @@ NNVM_REGISTER_OP(_backward_repeat)
.set_attr<FCompute>("FCompute<cpu>", RepeatOpBackward<cpu>);

NNVM_REGISTER_OP(tile)
.describe(R"code(Repeat the whole array by multiple times.
.describe(R"code(Repeats the whole array multiple times.

If ``reps`` has length *d*, and input array has dimension of *n*. There are
there cases:
Expand Down Expand Up @@ -557,8 +557,7 @@ NNVM_REGISTER_OP(_backward_tile)
.set_attr<FCompute>("FCompute<cpu>", TileOpBackward<cpu>);

NNVM_REGISTER_OP(reverse)
.describe(R"code(Reverse the order of elements in an array along given axis.
The shape of the array is preserved.
.describe(R"code(Reverses the order of elements along given axis while preserving array shape.

Note: reverse and flip are equivalent. We use reverse in the following examples.

Expand Down
2 changes: 1 addition & 1 deletion src/operator/upsampling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ Operator* UpSamplingProp::CreateOperatorEx(Context ctx, std::vector<TShape> *in_
DMLC_REGISTER_PARAMETER(UpSamplingParam);

MXNET_REGISTER_OP_PROPERTY(UpSampling, UpSamplingProp)
.describe("Perform nearest neighboor/bilinear up sampling to inputs")
.describe("Performs nearest neighbor/bilinear up sampling to inputs")
.add_argument("data", "NDArray-or-Symbol[]", "Array of tensors to upsample")
.add_arguments(UpSamplingParam::__FIELDS__())
.set_key_var_num_args("num_args");
Expand Down