Skip to content

Commit

Permalink
Revert "[cherry-pick] adapt c_embedding to phi namespace for custom d…
Browse files Browse the repository at this point in the history
…evices (PaddlePaddle#60774) (PaddlePaddle#61045)"

This reverts commit 0ccb9cb.
  • Loading branch information
hanhaowen-mt committed May 13, 2024
1 parent caf0458 commit d978bad
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 201 deletions.
4 changes: 0 additions & 4 deletions paddle/phi/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -199,10 +199,6 @@ if(WITH_MKLDNN)
"fusion/onednn/*.cc")
endif()

if(WITH_CUSTOM_DEVICE)
set(cc_search_pattern ${cc_search_pattern} "custom/*.cc")
endif()

file(
GLOB kernel_cc
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
Expand Down
93 changes: 0 additions & 93 deletions paddle/phi/kernels/custom/c_embedding_grad_kernel.cc

This file was deleted.

84 changes: 0 additions & 84 deletions paddle/phi/kernels/custom/c_embedding_kernel.cc

This file was deleted.

25 changes: 5 additions & 20 deletions test/legacy_test/c_embedding_op_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,10 @@ def get_c_embedding(start, end, table, ids):
return output


def c_embedding_wrapper(table, index, start_index=0, vocab_size=-1):
return paddle._C_ops.c_embedding(table, index, start_index, vocab_size)
def c_embedding_wrapper(table, index, start_index=0):
return paddle._legacy_C_ops.c_embedding(
table, index, "start_index", start_index
)


class TestCEmbeddingCPU(OpTest):
Expand All @@ -56,15 +58,11 @@ def initcase(self):
)
self.start_index = 10
self.end_index = self.start_index + 17
self.vocab_size = 34

self.inputs = {'W': table, 'Ids': ids}
np_out = get_c_embedding(self.start_index, self.end_index, table, ids)
self.outputs = {'Out': np_out.reshape((2, 4, 64))}
self.attrs = {
'start_index': self.start_index,
'vocab_size': self.vocab_size,
}
self.attrs = {'start_index': self.start_index}
if core.is_compiled_with_xpu():
self.__class__.use_xpu = True

Expand All @@ -89,20 +87,12 @@ def test_check_output(self):
self.check_output_with_place(core.CUDAPlace(0))
elif core.is_compiled_with_xpu():
self.check_output_with_place(core.XPUPlace(0))
else:
current_place = paddle.framework._current_expected_place()
if isinstance(current_place, paddle.CustomPlace):
self.check_output_with_place(current_place)

def test_check_grad(self):
if core.is_compiled_with_cuda():
self.check_grad_with_place(core.CUDAPlace(0), ['W'], 'Out')
elif core.is_compiled_with_xpu():
self.check_grad_with_place(core.XPUPlace(0), ['W'], 'Out')
else:
current_place = paddle.framework._current_expected_place()
if isinstance(current_place, paddle.CustomPlace):
self.check_grad_with_place(current_place, ['W'], 'Out')

def init_dtype(self):
if core.is_compiled_with_cuda():
Expand All @@ -111,11 +101,6 @@ def init_dtype(self):
elif core.is_compiled_with_xpu():
self.dtype = "float32"
self.ids_dtype = "int64"
else:
current_place = paddle.framework._current_expected_place()
if isinstance(current_place, paddle.CustomPlace):
self.dtype = "float32"
self.ids_dtype = "int64"


class TestCEmbeddingOpFP32(TestCEmbeddingOpBase):
Expand Down

0 comments on commit d978bad

Please sign in to comment.