Skip to content

Commit

Permalink
🚚 Refactor: moved uint8 support for sign to PaddlePaddle#59514
Browse files Browse the repository at this point in the history
  • Loading branch information
PommesPeter committed Nov 29, 2023
1 parent e86aeee commit e11eeca
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 31 deletions.
1 change: 0 additions & 1 deletion paddle/phi/kernels/cpu/sign_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ PD_REGISTER_KERNEL(sign,
CPU,
ALL_LAYOUT,
phi::SignKernel,
uint8_t,
int8_t,
int16_t,
int32_t,
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/funcs/eigen/sign.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ struct EigenSign<Eigen::DefaultDevice, T> {
}
};

template struct EigenSign<Eigen::DefaultDevice, uint8_t>;
template struct EigenSign<Eigen::DefaultDevice, int8_t>;
template struct EigenSign<Eigen::DefaultDevice, int16_t>;
template struct EigenSign<Eigen::DefaultDevice, int32_t>;
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/funcs/eigen/sign.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ struct EigenSign<Eigen::GpuDevice, T> {
}
};

template struct EigenSign<Eigen::GpuDevice, uint8_t>;
template struct EigenSign<Eigen::GpuDevice, int8_t>;
template struct EigenSign<Eigen::GpuDevice, int16_t>;
template struct EigenSign<Eigen::GpuDevice, int32_t>;
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/gpu/sign_kernel.cu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ PD_REGISTER_KERNEL(sign,
GPU,
ALL_LAYOUT,
phi::SignKernel,
uint8_t,
int8_t,
int16_t,
int32_t,
Expand Down
38 changes: 11 additions & 27 deletions test/legacy_test/test_sign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,64 +93,48 @@ def test_dygraph(self):
self.assertEqual((np_z == z_expected).all(), True)

def test_static(self):
np_input1 = np.random.uniform(-10, 10, (12, 10)).astype("int8")
np_input2 = np.random.uniform(-10, 10, (12, 10)).astype("uint8")
np_input3 = np.random.uniform(-10, 10, (12, 10)).astype("int16")
np_input4 = np.random.uniform(-10, 10, (12, 10)).astype("int32")
np_input5 = np.random.uniform(-10, 10, (12, 10)).astype("int64")
np_out1 = np.sign(np_input1)
np_input2 = np.random.uniform(-10, 10, (12, 10)).astype("int16")
np_input3 = np.random.uniform(-10, 10, (12, 10)).astype("int32")
np_input4 = np.random.uniform(-10, 10, (12, 10)).astype("int64")
np_out2 = np.sign(np_input2)
np_out3 = np.sign(np_input3)
np_out4 = np.sign(np_input4)
np_out5 = np.sign(np_input5)

def run(place):
with program_guard(Program(), Program()):
# The input type of sign_op must be Variable or numpy.ndarray.
input1 = 12
self.assertRaises(TypeError, paddle.tensor.math.sign, input1)
# The result of sign_op must correct.
input1 = paddle.static.data(
name='input1', shape=[12, 10], dtype="int8"
)
input2 = paddle.static.data(
name='input2', shape=[12, 10], dtype="uint8"
name='input2', shape=[12, 10], dtype="int16"
)
input3 = paddle.static.data(
name='input3', shape=[12, 10], dtype="int16"
name='input3', shape=[12, 10], dtype="int32"
)
input4 = paddle.static.data(
name='input4', shape=[12, 10], dtype="int32"
)
input5 = paddle.static.data(
name='input5', shape=[12, 10], dtype="int64"
name='input4', shape=[12, 10], dtype="int64"
)
out1 = paddle.sign(input1)
out2 = paddle.sign(input2)
out3 = paddle.sign(input3)
out4 = paddle.sign(input4)
out5 = paddle.sign(input5)
exe = paddle.static.Executor(place)
res1, res2, res3, res4, res5 = exe.run(
res2, res3, res4 = exe.run(
paddle.static.default_main_program(),
feed={
"input1": np_input1,
"input2": np_input2,
"input3": np_input3,
"input4": np_input4,
"input5": np_input5,
},
fetch_list=[out1, out2, out3, out4, out5],
fetch_list=[out2, out3, out4],
)
self.assertEqual((res1 == np_out1).all(), True)
self.assertEqual((res2 == np_out2).all(), True)
self.assertEqual((res3 == np_out3).all(), True)
self.assertEqual((res4 == np_out4).all(), True)
self.assertEqual((res5 == np_out5).all(), True)
input6 = paddle.static.data(
name='input6', shape=[-1, 4], dtype="float16"
input5 = paddle.static.data(
name='input5', shape=[-1, 4], dtype="float16"
)
paddle.sign(input6)
paddle.sign(input5)

for place in self.place:
run(place)
Expand Down

0 comments on commit e11eeca

Please sign in to comment.