-
Notifications
You must be signed in to change notification settings - Fork 5.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Update some doc about API reference. #11495
Changes from all commits
ea4d445
e7816db
7045775
b58eec8
ccf743b
573009d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { | |
void Apply() override { | ||
AddAttr<float>("mean", | ||
"(float, default 0.0) " | ||
"mean of random tensor.") | ||
"The mean (or center) of the gaussian distribution.") | ||
.SetDefault(.0f); | ||
AddAttr<float>("std", | ||
"(float, default 1.0) " | ||
"std of random tensor.") | ||
"The standard deviation (std, or spread) of the " | ||
"gaussian distribution.") | ||
.SetDefault(1.0f); | ||
AddAttr<int>("seed", | ||
"(int, default 0) " | ||
|
@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { | |
.SetDefault(framework::proto::VarType::FP32); | ||
|
||
AddComment(R"DOC( | ||
GaussianRandom Operator. | ||
|
||
Used to initialize tensors with gaussian random generator. | ||
The defalut mean of the distribution is 0. and defalut standard | ||
deviation (std) of the distribution is 1.. Uers can set mean and std | ||
by input arguments. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
)DOC"); | ||
} | ||
}; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -364,8 +364,7 @@ def dynamic_lstm(input, | |
cell_activation(str): The activation for cell output. Choices = ["sigmoid", | ||
"tanh", "relu", "identity"], default "tanh". | ||
candidate_activation(str): The activation for candidate hidden state. | ||
Choices = ["sigmoid", "tanh", | ||
"relu", "identity"], | ||
Choices = ["sigmoid", "tanh", "relu", "identity"], | ||
default "tanh". | ||
dtype(str): Data type. Choices = ["float32", "float64"], default "float32". | ||
name(str|None): A name for this layer(optional). If set None, the layer | ||
|
@@ -540,27 +539,31 @@ def dynamic_lstmp(input, | |
cell_activation(str): The activation for cell output. Choices = ["sigmoid", | ||
"tanh", "relu", "identity"], default "tanh". | ||
candidate_activation(str): The activation for candidate hidden state. | ||
Choices = ["sigmoid", "tanh", | ||
"relu", "identity"], | ||
Choices = ["sigmoid", "tanh", "relu", "identity"], | ||
default "tanh". | ||
proj_activation(str): The activation for projection output. | ||
Choices = ["sigmoid", "tanh", | ||
"relu", "identity"], | ||
Choices = ["sigmoid", "tanh", "relu", "identity"], | ||
default "tanh". | ||
dtype(str): Data type. Choices = ["float32", "float64"], default "float32". | ||
name(str|None): A name for this layer(optional). If set None, the layer | ||
will be named automatically. | ||
|
||
Returns: | ||
tuple: The projection of hidden state, and cell state of LSTMP. The \ | ||
shape of projection is (T x P), for the cell state which is \ | ||
(T x D), and both LoD is the same with the `input`. | ||
tuple: A tuple of two output variable: the projection of hidden state, \ | ||
and cell state of LSTMP. The shape of projection is (T x P), \ | ||
for the cell state which is (T x D), and both LoD is the same \ | ||
with the `input`. | ||
|
||
Examples: | ||
|
||
.. code-block:: python | ||
|
||
dict_dim, emb_dim = 128, 64 | ||
data = fluid.layers.data(name='sequence', shape=[1], | ||
dtype='int32', lod_level=1) | ||
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) | ||
hidden_dim, proj_dim = 512, 256 | ||
fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, | ||
fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4, | ||
act=None, bias_attr=None) | ||
proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, | ||
size=hidden_dim * 4, | ||
|
@@ -626,10 +629,10 @@ def dynamic_gru(input, | |
candidate_activation='tanh', | ||
h_0=None): | ||
""" | ||
**Dynamic GRU Layer** | ||
**Gated Recurrent Unit (GRU) Layer** | ||
|
||
Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on | ||
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ | ||
Sequence Modeling <https://arxiv.org/abs/1412.3555>`_ . | ||
|
||
The formula is as follows: | ||
|
||
|
@@ -676,17 +679,25 @@ def dynamic_gru(input, | |
Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid". | ||
candidate_activation(str): The activation for candidate hidden state. | ||
Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". | ||
h_0 (Variable): The hidden output of the first time step. | ||
h_0 (Variable): This is initial hidden state. If not set, default is | ||
zero. This is a tensor with shape (N x D), where N is the number of | ||
total time steps of input mini-batch feature and D is the hidden | ||
size. | ||
|
||
Returns: | ||
Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \ | ||
and lod is the same with the input. | ||
and sequence length is the same with the input. | ||
|
||
Examples: | ||
|
||
.. code-block:: python | ||
|
||
dict_dim, emb_dim = 128, 64 | ||
data = fluid.layers.data(name='sequence', shape=[1], | ||
dtype='int32', lod_level=1) | ||
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) | ||
hidden_dim = 512 | ||
x = fluid.layers.fc(input=data, size=hidden_dim * 3) | ||
x = fluid.layers.fc(input=emb, size=hidden_dim * 3) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim) | ||
""" | ||
|
||
|
@@ -924,13 +935,13 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): | |
|
||
Drop or keep each element of `x` independently. Dropout is a regularization | ||
technique for reducing overfitting by preventing neuron co-adaption during | ||
training. The dropout operator randomly set (according to the given dropout | ||
training. The dropout operator randomly sets (according to the given dropout | ||
probability) the outputs of some units to zero, while others are remain | ||
unchanged. | ||
|
||
Args: | ||
x (Variable): The input tensor. | ||
dropout_prob (float): Probability of setting units to zero. | ||
x (Variable): The input tensor variable. | ||
dropout_prob (float): Probability of setting units to zero. | ||
is_test (bool): A flag indicating whether it is in test phrase or not. | ||
seed (int): A Python integer used to create random seeds. If this | ||
parameter is set to None, a random seed is used. | ||
|
@@ -940,13 +951,14 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): | |
will be named automatically. | ||
|
||
Returns: | ||
Variable: A tensor variable. | ||
Variable: A tensor variable is the shape with `x`. | ||
|
||
Examples: | ||
|
||
.. code-block:: python | ||
|
||
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") | ||
droped = fluid.layers.dropout(input=x, dropout_rate=0.5) | ||
x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") | ||
droped = fluid.layers.dropout(x, dropout_prob=0.5) | ||
""" | ||
|
||
helper = LayerHelper('dropout', **locals()) | ||
|
@@ -2990,32 +3002,33 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): | |
norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes | ||
|
||
.. math:: | ||
y = \frac{x}{ \sqrt{\sum {x^2} + epsion }} | ||
|
||
y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }} | ||
|
||
For `x` with more dimensions, this layer independently normalizes each 1-D | ||
slice along dimension `axis`. | ||
|
||
Args: | ||
x(Variable|list): The input tensor to l2_normalize layer. | ||
axis(int): The axis on which to apply normalization. If `axis < 0`, | ||
axis(int): The axis on which to apply normalization. If `axis < 0`, \ | ||
the dimension to normalization is rank(X) + axis. -1 is the | ||
last dimension. | ||
epsilon(float): The epsilon value is used to avoid division by zero, | ||
epsilon(float): The epsilon value is used to avoid division by zero, \ | ||
the defalut value is 1e-10. | ||
name(str|None): A name for this layer(optional). If set None, the layer | ||
name(str|None): A name for this layer(optional). If set None, the layer \ | ||
will be named automatically. | ||
|
||
|
||
Returns: | ||
Variable: The output tensor variable. | ||
Variable: The output tensor variable is the same shape with `x`. | ||
|
||
Examples: | ||
|
||
.. code-block:: python | ||
|
||
data = fluid.layers.data(name="data", | ||
shape=(3, 17, 13), | ||
dtype="float32") | ||
normed = fluid.layers.l2_normalize(x=data, axis=1) | ||
data = fluid.layers.data(name="data", | ||
shape=(3, 17, 13), | ||
dtype="float32") | ||
normed = fluid.layers.l2_normalize(x=data, axis=1) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
""" | ||
|
||
if len(x.shape) == 1: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -497,11 +497,27 @@ def save_combine(x, file_path, overwrite=True): | |
Saves a list of variables into a single file. | ||
|
||
Args: | ||
x(list): A list of Tensor/LoDTensor to be saved together in a single file. | ||
x(list): A list of Tensor/LoDTensor variables to be saved together in | ||
a single file. | ||
file_path(str): The file path where variables will be saved. | ||
overwrite(bool): Whether or not cover the given file when it has already | ||
overwrite(bool): Whether or not cover the given file when it has already | ||
existed. If it's set 'False' and the file is existed, a runtime | ||
error will be thrown. | ||
|
||
Returns: | ||
There is no return value. | ||
|
||
Examples: | ||
|
||
.. code-block:: python | ||
|
||
v1 = fluid.layers.data(name="data", | ||
shape=(4, 6), | ||
dtype="float32") | ||
v2 = fluid.layers.data(name="data", | ||
shape=(6, 8, 4), | ||
dtype="float32") | ||
normed = fluid.layers.save_combine([v1, v2], file_path="output") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 该op在内部用,感觉用户不太需要,就没有暴露出来。 |
||
""" | ||
helper = LayerHelper("save_combine", **locals()) | ||
helper.append_op( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.