Skip to content

Commit

Permalink
【Fix PIR Unittest No.498,505,275】Fix some test case in PIR (#65617)
Browse files Browse the repository at this point in the history
* pir fix max_pool

* refine
  • Loading branch information
wanghuancoder committed Jul 2, 2024
1 parent 99bbfe6 commit ea399ed
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 32 deletions.
6 changes: 3 additions & 3 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -1844,7 +1844,7 @@ def adaptive_max_pool1d(
pool_size = [1] + convert_to_list(output_size, 1, 'pool_size')

x = unsqueeze(x, [2])
if in_dygraph_mode():
if in_dynamic_or_pir_mode():
pool_out = _C_ops.max_pool2d_with_index(
x, pool_size, [1, 1], [0, 0], False, True, False
)
Expand Down Expand Up @@ -1944,7 +1944,7 @@ def adaptive_max_pool2d(
output_size[0] = in_h
if output_size[1] is None:
output_size[1] = in_w
if in_dygraph_mode():
if in_dynamic_or_pir_mode():
pool_out = _C_ops.max_pool2d_with_index(
x, output_size, [1, 1], [0, 0], False, True, False
)
Expand Down Expand Up @@ -2041,7 +2041,7 @@ def adaptive_max_pool3d(
if output_size[2] is None:
output_size[2] = in_w

if in_dygraph_mode():
if in_dynamic_or_pir_mode():
# By default, strides is [1,1,1] and paddings is [0, 0, 0]
pool_out = _C_ops.max_pool3d_with_index(
x, output_size, [1, 1, 1], [0, 0, 0], False, True, False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,46 +279,53 @@ def test_static_graph(self):
for use_cuda in (
[False, True] if core.is_compiled_with_cuda() else [False]
):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.static.data(
name="x", shape=[2, 3, 7, 7], dtype="float32"
)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
paddle.enable_static()
x = paddle.static.data(
name="x", shape=[2, 3, 7, 7], dtype="float32"
)

adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[3, 3])
out_1 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[3, 3]
)
out_1 = adaptive_max_pool(x=x)

adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=5)
out_2 = adaptive_max_pool(x=x)

adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(output_size=[2, 5])
out_3 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[2, 5]
)
out_3 = adaptive_max_pool(x=x)

# adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC")
# out_4 = adaptive_max_pool(x=x)
# adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
# output_size=[3, 3], data_format="NHWC")
# out_4 = adaptive_max_pool(x=x)

adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]
)
out_5 = adaptive_max_pool(x=x)
adaptive_max_pool = paddle.nn.AdaptiveMaxPool2D(
output_size=[None, 3]
)
out_5 = adaptive_max_pool(x=x)

exe = paddle.static.Executor(place=place)
[res_1, res_2, res_3, res_5] = exe.run(
base.default_main_program(),
feed={"x": self.x_np},
fetch_list=[out_1, out_2, out_3, out_5],
)
exe = paddle.static.Executor(place=place)
[res_1, res_2, res_3, res_5] = exe.run(
base.default_main_program(),
feed={"x": self.x_np},
fetch_list=[out_1, out_2, out_3, out_5],
)

np.testing.assert_allclose(res_1, self.res_1_np)
np.testing.assert_allclose(res_1, self.res_1_np)

np.testing.assert_allclose(res_2, self.res_2_np)
np.testing.assert_allclose(res_2, self.res_2_np)

np.testing.assert_allclose(res_3, self.res_3_np)
np.testing.assert_allclose(res_3, self.res_3_np)

# np.testing.assert_allclose(res_4, self.res_4_np)
# np.testing.assert_allclose(res_4, self.res_4_np)

np.testing.assert_allclose(res_5, self.res_5_np)
np.testing.assert_allclose(res_5, self.res_5_np)

def test_dynamic_graph(self):
for use_cuda in (
Expand Down

0 comments on commit ea399ed

Please sign in to comment.