Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

randint API: remove out, device, stop_gradient; add name #25433

Merged
merged 5 commits into from
Jul 15, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 31 additions & 51 deletions python/paddle/fluid/tests/unittests/test_randint_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,9 @@
import unittest
import numpy as np
from op_test import OpTest

import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle
from paddle.fluid import core
from paddle import Program, program_guard


def output_hist(out):
Expand Down Expand Up @@ -56,25 +53,10 @@ def verify_output(self, outs):

class TestRandintOpError(unittest.TestCase):
def test_errors(self):
main_prog = Program()
start_prog = Program()
with program_guard(main_prog, start_prog):

def test_shape():
shape = np.array([2, 3])
paddle.randint(5, shape=shape, dtype='int32')

self.assertRaises(TypeError, test_shape)

def test_dtype():
paddle.randint(5, shape=[32, 32], dtype='float32')

self.assertRaises(TypeError, test_dtype)

def test_low_high():
paddle.randint(low=5, high=5, shape=[32, 32], dtype='int32')

self.assertRaises(ValueError, test_low_high)
with program_guard(Program(), Program()):
self.assertRaises(TypeError, paddle.randint, 5, shape=np.array([2]))
self.assertRaises(TypeError, paddle.randint, 5, dtype='float32')
self.assertRaises(ValueError, paddle.randint, 5, 5)


class TestRandintOp_attr_tensorlist(OpTest):
Expand Down Expand Up @@ -127,46 +109,44 @@ def verify_output(self, outs):
# Test python API
class TestRandintAPI(unittest.TestCase):
def test_api(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
with program_guard(Program(), Program()):
# results are from [0, 5).
output1 = paddle.randint(5)
out1 = paddle.randint(5)
# shape is a list and dtype is 'int32'
output2 = paddle.randint(
out2 = paddle.randint(
low=-100, high=100, shape=[64, 64], dtype='int32')
# shape is a tuple and dtype is 'int64'
output3 = paddle.randint(
out3 = paddle.randint(
low=-100, high=100, shape=(32, 32, 3), dtype='int64')
# shape is a tensorlist and dtype is 'float32'
dim_1 = fluid.layers.fill_constant([1], "int64", 32)
dim_2 = fluid.layers.fill_constant([1], "int32", 50)
output4 = paddle.randint(
low=-100, high=100, shape=[dim_1, 5], dtype='int32')
dim_1 = paddle.fill_constant([1], "int64", 32)
dim_2 = paddle.fill_constant([1], "int32", 50)
out4 = paddle.randint(
low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32')
# shape is a tensor and dtype is 'float64'
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
output5 = paddle.randint(
var_shape = paddle.nn.data(
name='var_shape', shape=[2], dtype="int64")
out5 = paddle.randint(
low=1, high=1000, shape=var_shape, dtype='int64')

place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)

exe.run(startup_program)
place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
exe = paddle.Executor(place)
outs = exe.run(
train_program,
feed={'var_shape': np.array([100, 100]).astype('int64')},
fetch_list=[output1, output2, output3, output4, output5])
fetch_list=[out1, out2, out3, out4, out5])


class TestRandintDygraphMode(unittest.TestCase):
def test_check_output(self):
with fluid.dygraph.guard():
x = paddle.randint(10, shape=[10], dtype="int32")
x_np = x.numpy()
for i in range(10):
self.assertTrue((x_np[i] >= 0 and x_np[i] < 10))
class TestRandintImperative(unittest.TestCase):
def test_api(self):
n = 10
with paddle.imperative.guard():
x1 = paddle.randint(n, shape=[10], dtype="int32")
x2 = paddle.tensor.randint(n)
x3 = paddle.tensor.random.randint(n)
zhupengyang marked this conversation as resolved.
Show resolved Hide resolved
for i in [x1, x2, x3]:
for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n))


if __name__ == "__main__":
Expand Down
217 changes: 78 additions & 139 deletions python/paddle/tensor/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,172 +37,111 @@
]


def randint(low,
high=None,
shape=None,
out=None,
dtype=None,
device=None,
stop_gradient=False,
seed=0,
name=None):
def randint(low=0, high=None, shape=[1], dtype=None, name=None):
"""
:alias_main: paddle.randint
:alias: paddle.randint,paddle.tensor.randint,paddle.tensor.random.randint

This function returns a Tensor filled with random integers from the "discrete uniform" distribution of the
specified data type in the interval [low, high). If high is None (the default), then results are from [0, low).
This function returns a Tensor filled with random integers from the
"discrete uniform" distribution of the specified data type in the interval
[low, high). If high is None (the default), then results are from [0, low).

Args:
low (int): The lower bound on the range of random values to generate, the low is included in the range.
(unless high=None, in which case this parameter is one above the highest such integer).
high (int, optional): The upper bound on the range of random values to generate, the high is excluded
in the range. Default None(see above for behavior if high=None).
shape (list|tuple|Variable, optional): The shape of the output Tensor, if the shape is a list or tuple,
its elements can be an integer
or a Tensor with the shape [1], and the type of the Tensor must be int32 or int64.
If the shape is a Variable, it is a 1-D Tensor, and the type of the Tensor must be
int32 or int64. Default is None, in which case the shape is [1].
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output Tensor
which can be int32, int64, if dytpe is `None`, the data
type of created Tensor is `int64`
device(str, optional): This parameter specifies that the Tensor is created
on the GPU or CPU.
stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
default value is False.
seed (int, optional): Random seed used for permute samples. If seed is
equal to 0, it means use a seed generated by the system. Note that
if seed is not 0, this operator will always generate the same random
permutation every time. Default: 0.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
low (int): The lower bound on the range of random values to generate,
the low is included in the range.(unless high=None, in which case
this parameter is one above the highest such integer). Default is 0.
high (int, optional): The upper bound on the range of random values to
generate, the high is excluded in the range. Default is None(see
above for behavior if high=None).
shape (list|tuple|Variable, optional): The shape of the output Tensor,
if the shape is a list or tuple, its elements can be an integer or
a Tensor with the shape [1], and the type of the Tensor must be
int32 or int64. If the shape is a Variable, it is a 1-D Tensor,
and the type of the Tensor must be int32 or int64. Default is None.
dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the
output Tensor which can be int32, int64. If dtype is `None`, the
data type of created Tensor is `int64`
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.

Returns:
Variable: A Tensor of the specified shape filled with random integers.

Raises:
TypeError: Randint's low must less then high.
ValueError: Randint's low must less then high.
TypeError: shape's type must be list, tuple or Variable.
TypeError: dtype must be int32 or int64.

Examples:
.. code-block:: python

import paddle
import paddle.fluid as fluid
import paddle
import numpy as np

paddle.enable_imperative()

# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = paddle.randint(low=-5, high=5, shape=[3, 4], dtype="int64")

# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",3)
dim_2 = fluid.layers.fill_constant([1],"int32",5)
result_2 = paddle.randint(low=-5, high=5, shape=[dim_1, dim_2], dtype="int32")

# example 3:
# attr shape is a Variable, the data type must be int64 or int32.
var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
result_3 = paddle.randint(low=-5, high=5, shape=var_shape, dtype="int32")
var_shape_int32 = fluid.data(name='var_shape_int32', shape=[2], dtype="int32")
result_4 = paddle.randint(low=-5, high=5, shape=var_shape_int32, dtype="int64")

# example 4:
# Input only one parameter
# low=0, high=10, shape=[1], dtype='int64'
result_4 = paddle.randint(10)
"""

def get_new_shape_tensor(list_shape):
new_shape_tensor = []
for dim in list_shape:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_shape_tensor.append(dim)
else:
assert isinstance(dim, int) or isinstance(dim, long)
temp_out = helper.create_variable_for_type_inference('int64')
fill_constant([1], 'int64', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor

def get_attr_shape(list_shape):
unk_dim_idx = -1
attrs_shape = []
for dim_idx, dim_size in enumerate(list_shape):
if isinstance(dim_size, Variable):
attrs_shape.append(-1)
else:
attrs_shape.append(dim_size)
assert dim_size > 0, (
"Each dimension size given in shape must not be negative "
"except one unknown dimension.")
return attrs_shape
# example 1:
# attr shape is a list which doesn't contain tensor Variable.
result_1 = paddle.randint(low=-5, high=5, shape=[3])
# [0 -3 2]

# example 2:
# attr shape is a list which contains tensor Variable.
dim_1 = fluid.layers.fill_constant([1],"int64",2)
dim_2 = fluid.layers.fill_constant([1],"int32",3)
result_2 = paddle.randint(low=-5, high=5, shape=[dim_1, dim_2], dtype="int32")
print(result_2.numpy())
# [[ 0 -1 -3]
# [ 4 -2 0]]

# example 3:
# attr shape is a Variable
var_shape = paddle.imperative.to_variable(np.array([3]))
result_3 = paddle.randint(low=-5, high=5, shape=var_shape)
# [-2 2 3]

# example 4:
# date type is int32
zhupengyang marked this conversation as resolved.
Show resolved Hide resolved
result_4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32')
# [-5 4 -4]

# example 5:
# Input only one parameter
# low=0, high=10, shape=[1], dtype='int64'
result_5 = paddle.randint(10)
# [7]

"""
if high is None:
high = low
low = 0
if dtype is None:
dtype = 'int64'
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint')

inputs = dict()
attrs = dict()

if shape is None:
shape = [1]
assert len(shape) > 0, ("The size of argument(shape) can't be zero.")

helper = LayerHelper("randint", **locals())
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)

if in_dygraph_mode():
attrs['shape'] = shape
else:
if isinstance(shape, Variable):
shape.stop_gradient = True
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of argument(shape) can't be zero.")
if utils._contain_var(shape):
inputs['ShapeTensorList'] = get_new_shape_tensor(shape)
else:
attrs["shape"] = get_attr_shape(shape)
check_type(shape, 'shape', (list, tuple, Variable), 'randint')
shape = utils._convert_shape_to_list(shape)
return core.ops.randint('shape', shape, 'low', low, 'high', high,
'seed', 0, 'dtype', dtype)

if high is None:
high = low
low = 0
attrs['low'] = low
attrs['high'] = high
attrs['seed'] = seed
if (low >= high):
check_type(shape, 'shape', (list, tuple, Variable), 'randint')
check_dtype(dtype, 'dtype', ['int32', 'int64'], 'randint')
if low >= high:
raise ValueError(
"randint's low must less then high, but received low = {0}, "
"high = {1}".format(low, high))

if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable(
name=name, dtype=dtype, persistable=False)
else:
check_dtype(dtype, 'dtype',
convert_dtype(out.dtype), 'randint',
"(The dtype in randint must be the same with out's dtype.)")
attrs['dtype'] = out.dtype
out.stop_gradient = stop_gradient
inputs = dict()
attrs = {'low': low, 'high': high, 'seed': 0, 'dtype': dtype}
utils._get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='randint')

if device is None:
helper.append_op(
type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs)
else:
with device_guard(device):
helper.append_op(
type='randint',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
helper = LayerHelper("randint", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='randint', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out


Expand Down