Skip to content

Commit

Permalink
Re-organize TBE tests, pt 4 (#2274)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2274

- Re-organize TBE tests, pt 4

Reviewed By: spcyppt

Differential Revision: D52893922

fbshipit-source-id: 6d916b9ecf0c4b1f212552ca62706bbc1a2d56c5
  • Loading branch information
q10 authored and facebook-github-bot committed Jan 20, 2024
1 parent 2cea7db commit f78a12b
Show file tree
Hide file tree
Showing 9 changed files with 614 additions and 597 deletions.
2 changes: 0 additions & 2 deletions .github/scripts/fbgemm_gpu_test.bash
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,12 @@ run_fbgemm_gpu_tests () {

# These are either non-tests or currently-broken tests in both FBGEMM_GPU and FBGEMM_GPU-CPU
local files_to_skip=(
./tbe/split_table_batched_embeddings_test.py
./ssd_split_table_batched_embeddings_test.py
)

if [ "$fbgemm_variant" == "cpu" ]; then
# These are tests that are currently broken in FBGEMM_GPU-CPU
local ignored_tests=(
./tbe/forward_test.py
./uvm_test.py
)
elif [ "$fbgemm_variant" == "rocm" ]; then
Expand Down
5 changes: 4 additions & 1 deletion fbgemm_gpu/test/tbe/backward_adagrad_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@


@optests.generate_opcheck_tests(fast=True)
@unittest.skipIf(*gpu_unavailable)
class BackwardAdagradTest(unittest.TestCase):
def execute_backward_adagrad_( # noqa C901
self,
Expand Down Expand Up @@ -460,6 +459,7 @@ def execute_backward_adagrad_( # noqa C901
torch.zeros_like(table_indice_weight_grad_mask),
)

@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
Expand Down Expand Up @@ -530,6 +530,7 @@ def test_backward_adagrad_fp16_pmSUM( # noqa C901
output_dtype,
)

@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
Expand Down Expand Up @@ -600,6 +601,7 @@ def test_backward_adagrad_fp16_pmMEAN( # noqa C901
output_dtype,
)

@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
Expand Down Expand Up @@ -805,6 +807,7 @@ def test_backward_adagrad_fp32_pmMEAN( # noqa C901
output_dtype,
)

@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
Expand Down
3 changes: 2 additions & 1 deletion fbgemm_gpu/test/tbe/backward_none_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@


@optests.generate_opcheck_tests(fast=True, additional_decorators=additional_decorators)
@unittest.skipIf(*gpu_unavailable)
class BackwardNoneTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
Expand Down Expand Up @@ -103,6 +103,7 @@ class BackwardNoneTest(unittest.TestCase):
def test_backward_none(self, **kwargs: Any) -> None:
self.execute_backward_none_(**kwargs)

@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
Expand Down
20 changes: 6 additions & 14 deletions fbgemm_gpu/test/tbe/failures_dict_fast.json
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,17 @@
"comment": "",
"status": "xsuccess"
},
"NBitSplitEmbeddingTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xsuccess"
}
},
"fbgemm::int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xfail"
},
"NBitSplitEmbeddingTest.test_schema__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_schema__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xfail"
}
Expand Down Expand Up @@ -125,7 +125,7 @@
"comment": "",
"status": "xfail"
},
"NBitSplitEmbeddingTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xfail"
},
Expand Down Expand Up @@ -238,7 +238,7 @@
"comment": "",
"status": "xfail"
},
"NBitSplitEmbeddingTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xfail"
},
Expand Down Expand Up @@ -269,7 +269,7 @@
"comment": "",
"status": "xfail"
},
"NBitSplitEmbeddingTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"NBitSplitEmbeddingsTest.test_faketensor__test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function": {
"comment": "",
"status": "xfail"
}
Expand All @@ -278,21 +278,13 @@
"NBitForwardTest.test_faketensor__test_nbit_forward_uvm_cache": {
"comment": "",
"status": "xfail"
},
"SplitTableBatchedEmbeddingsTest.test_faketensor__test_pruning": {
"comment": "",
"status": "xfail"
}
},
"fbgemm::pruned_hashmap_insert": {},
"fbgemm::pruned_hashmap_lookup": {
"NBitForwardTest.test_faketensor__test_nbit_forward_uvm_cache": {
"comment": "",
"status": "xfail"
},
"SplitTableBatchedEmbeddingsTest.test_faketensor__test_pruning": {
"comment": "",
"status": "xfail"
}
},
"fbgemm::reset_weight_momentum": {
Expand Down
6 changes: 4 additions & 2 deletions fbgemm_gpu/test/tbe/forward_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,8 +268,10 @@ def execute_forward_( # noqa C901
output_dtype=output_dtype,
use_experimental_tbe=use_experimental_tbe,
)
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)

if not use_cpu and torch.cuda.is_available():
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)

for t in range(T):
cc.split_embedding_weights()[t].data.copy_(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@


@optests.generate_opcheck_tests(fast=True)
class NBitSplitEmbeddingTest(unittest.TestCase):
class NBitSplitEmbeddingsTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
Expand Down
Loading

0 comments on commit f78a12b

Please sign in to comment.