Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rand API: remove out, device, stop_gradient; add name #25246

Merged
merged 5 commits into from
Jul 7, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/gaussian_random_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {

return;
}
if (!(ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList"))) {
if (!ctx->HasInput("ShapeTensor") && !ctx->HasInputs("ShapeTensorList")) {
PADDLE_ENFORCE_GT(
shape.size(), 0UL,
platform::errors::InvalidArgument(
Expand Down
112 changes: 41 additions & 71 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -10486,29 +10486,24 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
# [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
"""

helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
if not isinstance(shape, (list, tuple, Variable)):
raise TypeError(
"The type of 'shape' in fill_constant must be Variable, list or tuple, but "
"received %s." % (type(shape)))
c_dtype = convert_np_dtype_to_dtype_(dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random')

inputs = {}
attrs = {
'mean': mean,
'std': std,
'seed': seed,
'dtype': c_dtype,
'dtype': dtype,
'use_mkldnn': False
}

inputs = {}
utils._get_shape_tensor_inputs(
inputs=inputs,
helper=helper,
attrs=attrs,
shape=shape,
op_type='gaussian_random')
inputs=inputs, attrs=attrs, shape=shape, op_type='gaussian_random')

helper = LayerHelper('gaussian_random', **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='gaussian_random',
inputs=inputs,
Expand Down Expand Up @@ -14861,7 +14856,8 @@ def gather_tree(ids, parents):


@templatedoc()
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
name=None):
"""
This OP initializes a variable with random values sampled from a
uniform distribution in the range [min, max).
Expand All @@ -14876,18 +14872,24 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
result=[[0.8505902, 0.8397286]]

Args:
shape (list|tuple|Variable): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor must be int32 or int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor must be int32 or int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the output Tensor. Supported data types: float32, float64.
Default: float32.
min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means use a
seed generated by the system. Note that if seed is not 0, this
operator will always generate the same random numbers every time.
Default 0.
shape (list|tuple|Variable): The shape of the output Tensor, if the
shape is a list or tuple, its elements can be an integer or a
Tensor with the shape [1], and the type of the Tensor must be
int32 or int64. If the shape is a Variable, it is a 1-D Tensor, and
the type of the Tensor must be int32 or int64.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The type of the
output Tensor. Supported data types: float32, float64. Default: float32.
min (float, optional): The lower bound on the range of random values
to generate, the min is included in the range. Default -1.0.
max (float, optional): The upper bound on the range of random values
to generate, the max is excluded in the range. Default 1.0.
seed (int, optional): Random seed used for generating samples. 0 means
use a seed generated by the system. Note that if seed is not 0,
this operator will always generate the same random numbers every
time. Default 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.

Returns:
Variable: A Tensor of the specified shape filled with uniform_random values.
Expand Down Expand Up @@ -14917,62 +14919,30 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0):
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = fluid.layers.uniform_random(var_shape_int32)



"""
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random')

def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = helper.create_variable_for_type_inference('int64')
fill_constant([1], 'int64', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
if in_dygraph_mode():
shape = utils._convert_shape_to_list(shape)
return core.ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)

def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
assert dim_size > 0, (
"Each dimension size given in shape must not be negative "
"except one unknown dimension.")
return attrs_shape
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random')
check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random')

helper = LayerHelper("uniform_random", **locals())
inputs = dict()
attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
if in_dygraph_mode():
attrs['shape'] = shape
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
attrs["shape"] = get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
utils._get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random')

helper = LayerHelper("uniform_random", **locals())
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="uniform_random", inputs=inputs, attrs=attrs,
outputs={"Out": out})

return helper.append_activation(out)
return out


def unbind(input, axis=0):
Expand Down
15 changes: 3 additions & 12 deletions python/paddle/fluid/layers/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,12 +685,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
attrs['str_value'] = str(float(value))

if in_dygraph_mode():
if isinstance(shape, (list, tuple)):
shape = list(
map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
shape))
else:
shape = list(shape.numpy().astype(int))
shape = utils._convert_shape_to_list(shape)
if out is None:
out = _varbase_creator(dtype=dtype)

Expand Down Expand Up @@ -719,12 +714,8 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
'fill_constant')

helper = LayerHelper("fill_constant", **locals())
inputs = utils._get_shape_tensor_inputs(
inputs=inputs,
helper=helper,
attrs=attrs,
shape=shape,
op_type='fill_constant')
utils._get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')

if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
Expand Down
27 changes: 20 additions & 7 deletions python/paddle/fluid/layers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def _contain_var(list_or_tuple):
return False


def _get_shape_tensor_inputs(inputs, helper, attrs, shape, op_type):
def _get_shape_tensor_inputs(inputs, attrs, shape, op_type):
from .tensor import fill_constant, cast

def _get_attr_shape(list_shape):
Expand All @@ -295,7 +295,7 @@ def _get_attr_shape(list_shape):
return attr_shape

def _get_shape_tensor(list_shape):
new_shape_tensor = []
shape_tensor_list = []
for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable):
dim.stop_gradient = True
Expand All @@ -305,11 +305,11 @@ def _get_shape_tensor(list_shape):
'(When type of shape in' + op_type + 'is list or tuple.)')
if convert_dtype(dim.dtype) == 'int64':
dim = cast(x=dim, dtype='int32')
new_shape_tensor.append(dim)
shape_tensor_list.append(dim)
else:
temp_out = fill_constant([1], 'int32', dim, force_cpu=True)
new_shape_tensor.append(temp_out)
return new_shape_tensor
shape_tensor_list.append(temp_out)
return shape_tensor_list

if isinstance(shape, Variable):
shape.stop_gradient = True
Expand All @@ -325,8 +325,8 @@ def _get_shape_tensor(list_shape):
attrs["shape"] = _get_attr_shape(shape)
if _contain_var(shape):
inputs['ShapeTensorList'] = _get_shape_tensor(shape)

return inputs
else:
raise TypeError("Shape only supports Variable, or list, or tuple.")


def _convert_to_tensor_list(old_list, dtype="int32"):
Expand All @@ -345,3 +345,16 @@ def _convert_to_tensor_list(old_list, dtype="int32"):
temp_out = fill_constant([1], dtype, ele, force_cpu=True)
new_list_tensor.append(temp_out)
return new_list_tensor


def _convert_shape_to_list(shape):
"""
Convert shape(list, tuple, variable) to list in imperative mode
"""
if isinstance(shape, (list, tuple)):
shape = list(
map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
shape))
else:
shape = list(shape.numpy().astype(int))
return shape
58 changes: 30 additions & 28 deletions python/paddle/fluid/tests/unittests/test_rand_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,71 +47,73 @@ def test_dtype():

self.assertRaises(TypeError, test_dtype)

def test_shape_list():
rand(shape=[2.])

self.assertRaises(TypeError, test_shape_list)

def test_shape_list2():
rand(shape=[2, 3.])

self.assertRaises(TypeError, test_shape_list2)

def test_device():
rand(shape=[3, 4], device='device')

self.assertRaises(ValueError, test_device)


class TestRandOp(unittest.TestCase):
"""
This class test the common usages of randop.

"""

def test_run(self):
use_cuda = False
def run_net(self, use_cuda=False):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)

train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
result_1 = rand(shape=[3, 4])
result_0 = rand([3, 4])
result_1 = rand([3, 4], 'float64')

dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
result_2 = rand(shape=[dim_1, dim_2])

var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = rand(var_shape)

var_shape_int32 = fluid.data(
name='var_shape_int32', shape=[2], dtype="int32")
result_4 = rand(var_shape_int32)

exe.run(startup_program)

x1 = np.array([3, 2]).astype('int64')
x2 = np.array([4, 3]).astype('int32')
ret = exe.run(train_program,
feed={"var_shape": x1,
"var_shape_int32": x2},
fetch_list=[result_1, result_2, result_3, result_4])
ret = exe.run(
train_program,
feed={"var_shape": x1,
"var_shape_int32": x2},
fetch_list=[result_1, result_1, result_2, result_3, result_4])

def test_run(self):
self.run_net(False)
if core.is_compiled_with_cuda():
self.run_net(True)


class TestRandOpForDygraph(unittest.TestCase):
"""
This class test the common usages of randop.

"""

def test_run(self):
use_cuda = False
with fluid.dygraph.guard():
rand(shape=[3, 4])
def run_net(self, use_cuda=False):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
with fluid.dygraph.guard(place):
rand([3, 4])

rand([3, 4], 'float64')

dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 5)
rand(shape=[dim_1, dim_2])

var_shape = fluid.dygraph.to_variable(np.array([3, 4]))
rand(var_shape)

def test_run(self):
self.run_net(False)
if core.is_compiled_with_cuda():
self.run_net(True)


if __name__ == "__main__":
unittest.main()
Loading