Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3] Update the precision of some op tests from fp32 to fp64 #21847

Merged
merged 5 commits into from
Dec 20, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_sequence_concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def setLoD(self):
self.out_lod = [19, 11]

def setUp(self):
x1 = np.random.random(size=(10, 80)).astype('float32')
x2 = np.random.random(size=(20, 80)).astype('float32')
x1 = np.random.random(size=(10, 80)).astype('float64')
x2 = np.random.random(size=(20, 80)).astype('float64')
self.setLoD()

out = np.concatenate((x1[0:self.lod1[0]], x2[0:self.lod2[0]],
Expand Down
28 changes: 14 additions & 14 deletions python/paddle/fluid/tests/unittests/test_sequence_expand.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@

class TestSequenceExpand(OpTest):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [3, 40]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [3, 40]).astype('float64')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float64')
y_lod = [[1, 3, 4]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}

Expand Down Expand Up @@ -80,56 +80,56 @@ def test_check_grad(self):

class TestSequenceExpandCase1(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float64')
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float64')
y_lod = [[2, 3], [2, 2, 3, 3, 3]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 1}


class TestSequenceExpandCase2(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32')
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float64')
x_lod = [[1]]
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32')
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float64')
y_lod = [[2], [1, 1]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 0}


class TestSequenceExpandCase3(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float64')
x_lod = [[1, 1, 1, 1]]
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float64')
y_lod = [[2, 2, 2, 2]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}


class TestSequenceExpandCase4(TestSequenceExpand):
def set_data(self):
data = np.random.uniform(0.1, 1, [5 * 2, 1])
x_data = np.array(data).reshape([5, 2]).astype('float32')
x_data = np.array(data).reshape([5, 2]).astype('float64')
x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [5, 1]).astype('float64')
y_lod = [[2], [2, 3]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}


class TestSequenceExpandCase5(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [6, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [6, 1]).astype('float64')
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float64')
y_lod = [[2, 4], [2, 2, 3, 0, 3, 3]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 1}


class TestSequenceExpandCase6(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float64')
x_lod = [[1, 1, 0, 1, 1]]
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float64')
y_lod = [[0, 2, 4, 2, 0]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}

Expand Down
16 changes: 8 additions & 8 deletions python/paddle/fluid/tests/unittests/test_sequence_expand_as.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def setUp(self):
self.compute()

def set_data(self):
x_data = np.random.uniform(0.1, 1, [3, 40]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [3, 40]).astype('float64')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float64')
y_lod = [[1, 3, 4]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}

Expand Down Expand Up @@ -57,27 +57,27 @@ def test_check_grad(self):

class TestSequenceExpandAsCase1(TestSequenceExpandAs):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float64')
x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [10, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [10, 1]).astype('float64')
y_lod = [[2, 2, 0, 3, 3]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}


class TestSequenceExpandAsCase2(TestSequenceExpandAs):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float64')
x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [10, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [10, 1]).astype('float64')
y_lod = [[0, 4, 0, 6, 0]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}


class TestSequenceExpandAsCase3(TestSequenceExpandAs):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32')
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float64')
x_lod = [[1]]
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32')
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float64')
y_lod = [[2]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}

Expand Down
16 changes: 8 additions & 8 deletions python/paddle/fluid/tests/unittests/test_sequence_pad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0]
self.padded_length = -1
self.dtype = 'float32'
self.dtype = 'float64'

def set_data(self):
x_data = np.random.uniform(0.1, 0.5, self.x_shape).astype(self.dtype)
Expand Down Expand Up @@ -84,7 +84,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0, 2.0, 3.0, 4.0]
self.padded_length = -1
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp3(TestSequencePadOp):
Expand All @@ -93,7 +93,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0]
self.padded_length = 7
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp4(TestSequencePadOp):
Expand All @@ -102,7 +102,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0, 2.0, 3.0, 4.0]
self.padded_length = 7
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp5(TestSequencePadOp):
Expand All @@ -111,7 +111,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0]
self.padded_length = -1
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp6(TestSequencePadOp):
Expand All @@ -120,7 +120,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [[1.0, 2.0], [3.0, 4.0]]
self.padded_length = -1
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp7(TestSequencePadOp):
Expand All @@ -129,7 +129,7 @@ def set_attr(self):
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0]
self.padded_length = 7
self.dtype = 'float32'
self.dtype = 'float64'


class TestSequencePadOp8(TestSequencePadOp):
Expand All @@ -138,7 +138,7 @@ def set_attr(self):
self.x_len_lod = [[0, 8, 0, 4, 0]]
self.pad_value = [1.0]
self.padded_length = 10
self.dtype = 'float32'
self.dtype = 'float64'


if __name__ == '__main__':
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/fluid/tests/unittests/test_sequence_reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class TestSequenceReshape(OpTest):
def init_data(self):
self.dimension = 12
self.x_lod = [[4, 1, 3, 3]]
self.x = np.random.uniform(0.1, 1, [11, 24]).astype('float32')
self.x = np.random.uniform(0.1, 1, [11, 24]).astype('float64')

def setUp(self):
self.init_data()
Expand All @@ -42,7 +42,7 @@ def compute_output(self, x, x_lod, dimension):
offset = (seq_len * x_width) / dimension
assert int(offset) * dimension == seq_len * x_width
out_lod[0].append(int(offset))
out = np.zeros(shape=(sum(out_lod[0]), dimension)).astype('float32')
out = np.zeros(shape=(sum(out_lod[0]), dimension)).astype('float64')
out.ravel()[:] = x.ravel()[:]
return out, out_lod

Expand All @@ -57,28 +57,28 @@ class TestSequenceReshape_reduce(TestSequenceReshape):
def init_data(self):
self.dimension = 24
self.x_lod = [[4, 2, 2, 4]]
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64')


class TestSequenceReshape_same(TestSequenceReshape):
def init_data(self):
self.dimension = 12
self.x_lod = [[4, 2, 2, 4]]
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64')


class TestSequenceReshape_reduce_seq_len0(TestSequenceReshape):
def init_data(self):
self.dimension = 24
self.x_lod = [[0, 6, 0, 2, 4]]
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64')


class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape):
def init_data(self):
self.dimension = 24
self.x_lod = [[0, 2, 8, 2, 0]]
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float32')
self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64')


if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ def init_lod(self):
def setUp(self):
self.op_type = "sequence_scatter"

X_data = np.random.uniform(0.1, 1.0, [3, 6]).astype('float32')
X_data = np.random.uniform(0.1, 1.0, [3, 6]).astype('float64')
Ids_data = np.array([[0], [1], [2], [5], [4], [3], [0], [1], [3], [2],
[5], [4]]).astype('int64')
Ids_lod = self.init_lod()

Updates_data = np.random.uniform(0.1, 1.0, [12, 1]).astype('float32')
Updates_data = np.random.uniform(0.1, 1.0, [12, 1]).astype('float64')
Updates_lod = Ids_lod

Out_data = np.copy(X_data)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@ def setUp(self):
self.use_cudnn = False
self.init_op_type()

x = np.random.uniform(0.1, 1, (110, 1)).astype("float32")
x = np.random.uniform(0.1, 1, (110, 1)).astype("float64")
self.init_lod()
out = np.zeros((110, 1)).astype("float32")
out = np.zeros((110, 1)).astype("float64")
offset = 0
for i in range(len(self.lod[0])):
if (self.lod[0][i] == 0):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class TestSequenceUnpadOp(OpTest):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 40)
self.dtype = "float32"
self.dtype = "float64"

def compute(self):
assert len(self.length) == self.x_shape[0]
Expand Down Expand Up @@ -58,7 +58,7 @@ class TestSequenceUnpadOp2(TestSequenceUnpadOp):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5, 4, 3)
self.dtype = "float32"
self.dtype = "float64"


class TestSequenceUnpadOp3(TestSequenceUnpadOp):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float32")),
.astype("float64")),
'Label': np.random.randint(0, 2, (batch_size, num_classes))
.astype("float32")
.astype("float64")
}

# Fw Pass is implemented as elementwise sigmoid followed by
Expand Down Expand Up @@ -65,9 +65,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float32")),
.astype("float64")),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype("float32")
.astype("float64")
}
self.attrs = {'ignore_index': ignore_index, }
# Fw Pass is implemented as elementwise sigmoid followed by
Expand Down Expand Up @@ -98,9 +98,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float32")),
.astype("float64")),
'Label': np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float32")
.astype("float64")
}

# Fw Pass is implemented as elementwise sigmoid followed by
Expand All @@ -127,9 +127,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, (batch_size, num_classes))
.astype("float32")),
.astype("float64")),
'Label': np.random.randint(-1, 2, (batch_size, num_classes))
.astype("float32")
.astype("float64")
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
sigmoid_X = expit(self.inputs['X'])
Expand Down Expand Up @@ -160,9 +160,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype("float32")),
.astype("float64")),
'Label': np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype("float32")
.astype("float64")
}

# Fw Pass is implemented as elementwise sigmoid followed by
Expand All @@ -189,9 +189,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype("float32")),
.astype("float64")),
'Label': np.random.randint(-1, 2, tuple(batch_size + [num_classes]))
.astype("float32")
.astype("float64")
}
self.attrs = {'ignore_index': ignore_index, 'normalize': True}
sigmoid_X = expit(self.inputs['X'])
Expand Down Expand Up @@ -222,9 +222,9 @@ def setUp(self):
self.inputs = {
'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype("float32")),
.astype("float64")),
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype("float32")
.astype("float64")
}

# Fw Pass is implemented as elementwise sigmoid followed by
Expand Down
Loading