Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

move TypedStorage handling to assertEqual #89557

Closed
wants to merge 17 commits into from
Closed
2 changes: 2 additions & 0 deletions test/test_serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,6 +675,8 @@ def test_load_error_msg(self):
with self.assertRaisesRegex(AttributeError, expected_err_msg):
torch.load(resource)

# FIXME: See https://github.com/pytorch/pytorch/issues/90497
@unittest.expectedFailure
def test_save_different_dtype_unallocated(self):
devices = ['cpu']
if torch.cuda.is_available():
Expand Down
2 changes: 1 addition & 1 deletion third_party/fbgemm
Submodule fbgemm updated 75 files
+8 −26 .github/workflows/fbgemm_nightly_build.yml
+3 −7 .github/workflows/fbgemm_nightly_build_cpu.yml
+41 −47 .github/workflows/fbgemm_release_build.yml
+4 −8 .github/workflows/fbgemm_release_build_cpu.yml
+3 −4 .github/workflows/fbgemmci.yml
+0 −45 .github/workflows/pylint.yaml
+6 −16 fbgemm_gpu/CMakeLists.txt
+4 −10 fbgemm_gpu/README.md
+214 −90 fbgemm_gpu/bench/bench_utils.py
+1 −147 fbgemm_gpu/bench/jagged_tensor_benchmark.py
+20 −23 fbgemm_gpu/bench/merge_embeddings_benchmark.py
+0 −88 fbgemm_gpu/bench/sparse_ops_benchmark.py
+4 −0 fbgemm_gpu/bench/split_embeddings_cache_benchmark.py
+51 −489 fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py
+0 −403 fbgemm_gpu/bench/ssd_table_batched_embeddings_benchmark.py
+2 −2 fbgemm_gpu/codegen/embedding_backward_split_indice_weights_template.cu
+10 −8 fbgemm_gpu/codegen/embedding_backward_split_template.cu
+1 −1 fbgemm_gpu/codegen/embedding_forward_quantized_cpu_template.cpp
+2 −137 fbgemm_gpu/codegen/embedding_forward_quantized_host.cpp
+1 −1 fbgemm_gpu/codegen/embedding_forward_quantized_host_cpu.cpp
+9 −1 fbgemm_gpu/codegen/embedding_forward_quantized_split_template.cu
+5 −5 fbgemm_gpu/codegen/embedding_forward_split_template.cu
+2 −8 fbgemm_gpu/codegen/split_embedding_codegen_lookup_invoker.template
+2 −1 fbgemm_gpu/fbgemm_gpu/__init__.py
+9 −9 fbgemm_gpu/fbgemm_gpu/_fbgemm_gpu_docs.py
+0 −6 fbgemm_gpu/fbgemm_gpu/docs/__init__.py
+0 −1 fbgemm_gpu/fbgemm_gpu/enums.py
+8 −50 fbgemm_gpu/fbgemm_gpu/quantize_comm.py
+1 −1 fbgemm_gpu/fbgemm_gpu/split_embedding_configs.py
+2 −4 fbgemm_gpu/fbgemm_gpu/split_embedding_inference_converter.py
+0 −483 fbgemm_gpu/fbgemm_gpu/split_embedding_utils.py
+91 −147 fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py
+0 −1,001 fbgemm_gpu/fbgemm_gpu/ssd_split_table_batched_embeddings_ops.py
+0 −1 fbgemm_gpu/fbgemm_gpu/uvm.py
+0 −92 fbgemm_gpu/include/fbgemm_gpu/embedding_inplace_update.h
+0 −77 fbgemm_gpu/include/fbgemm_gpu/fbgemm_cuda_utils.cuh
+44 −73 fbgemm_gpu/include/fbgemm_gpu/sparse_ops.h
+3 −40 fbgemm_gpu/include/fbgemm_gpu/split_embeddings_cache_cuda.cuh
+0 −19 fbgemm_gpu/include/fbgemm_gpu/topology_utils.h
+2 −63 fbgemm_gpu/src/cumem_utils.cu
+0 −14 fbgemm_gpu/src/cumem_utils.h
+27 −5 fbgemm_gpu/src/cumem_utils_host.cpp
+0 −254 fbgemm_gpu/src/embedding_inplace_update.cu
+0 −118 fbgemm_gpu/src/embedding_inplace_update_cpu.cpp
+0 −16 fbgemm_gpu/src/embedding_inplace_update_gpu.cpp
+431 −712 fbgemm_gpu/src/jagged_tensor_ops.cu
+0 −331 fbgemm_gpu/src/jagged_tensor_ops_autograd.cpp
+273 −295 fbgemm_gpu/src/jagged_tensor_ops_cpu.cpp
+0 −137 fbgemm_gpu/src/jagged_tensor_ops_meta.cpp
+181 −3 fbgemm_gpu/src/merge_pooled_embeddings_gpu.cpp
+66 −2 fbgemm_gpu/src/metric_ops.cu
+0 −372 fbgemm_gpu/src/quantize_ops.cu
+0 −18 fbgemm_gpu/src/quantize_ops_cpu.cpp
+0 −4 fbgemm_gpu/src/quantize_ops_gpu.cpp
+16 −231 fbgemm_gpu/src/sparse_ops.cu
+55 −110 fbgemm_gpu/src/sparse_ops_cpu.cpp
+0 −222 fbgemm_gpu/src/sparse_ops_gpu.cpp
+65 −584 fbgemm_gpu/src/split_embeddings_cache_cuda.cu
+32 −10 fbgemm_gpu/src/split_table_batched_embeddings.cpp
+0 −287 fbgemm_gpu/src/ssd_split_embeddings_cache_cuda.cu
+0 −135 fbgemm_gpu/src/ssd_split_table_batched_embeddings.cpp
+0 −573 fbgemm_gpu/src/ssd_table_batched_embeddings.h
+0 −183 fbgemm_gpu/src/topology_utils.cpp
+0 −112 fbgemm_gpu/test/embedding_inplace_update_test.cpp
+63 −397 fbgemm_gpu/test/jagged_tensor_ops_test.py
+0 −111 fbgemm_gpu/test/lint/check_meta_header.py
+0 −17 fbgemm_gpu/test/lint/flake8_problem_matcher.json
+8 −24 fbgemm_gpu/test/quantize_comm_test.py
+3 −3 fbgemm_gpu/test/quantize_ops_test.py
+4 −125 fbgemm_gpu/test/sparse_ops_test.py
+314 −343 fbgemm_gpu/test/split_table_batched_embeddings_test.py
+0 −759 fbgemm_gpu/test/ssd_split_table_batched_embeddings_test.py
+0 −1 fbgemm_gpu/test/test_utils.py
+66 −154 fbgemm_gpu/test/uvm_test.py
+1 −1 fbgemm_gpu/version.py
2 changes: 1 addition & 1 deletion third_party/ideep
Submodule ideep updated 1 files
+1 −1 mkl-dnn
28 changes: 1 addition & 27 deletions torch/testing/_comparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -1076,35 +1076,9 @@ def originate_pairs(
Returns:
(List[Pair]): Originated pairs.
"""
if isinstance(actual, torch.TypedStorage) and isinstance(
expected, torch.TypedStorage
):
actual_len = actual._size()
expected_len = expected._size()
if actual_len != expected_len:
raise ErrorMeta(
AssertionError,
f"The length of the sequences mismatch: {actual_len} != {expected_len}",
id=id,
)

pairs = []
for idx in range(actual_len):
pairs.extend(
originate_pairs(
actual._getitem(idx),
expected._getitem(idx),
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
id=(*id, idx),
**options,
)
)
return pairs
# We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
# "a" == "a"[0][0]...
elif (
if (
isinstance(actual, sequence_types)
and not isinstance(actual, str)
and isinstance(expected, sequence_types)
Expand Down
24 changes: 23 additions & 1 deletion torch/testing/_internal/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1879,6 +1879,28 @@ def _process_inputs(self, actual, expected, *, id, allow_subclasses):
return actual, expected


class TypedStoragePair(TensorLikePair):
"""Pair for :class:`torch.storage.TypedStorage` inputs."""
def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters):
self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage)
super().__init__(actual, expected, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)

def _to_tensor(self, typed_storage):
return torch.tensor(
typed_storage._untyped_storage,
dtype={
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8
}.get(typed_storage.dtype, typed_storage.dtype),
device=typed_storage.device,
)


class UnittestPair(Pair):
"""Fallback ABC pair that handles non-numeric inputs.

Expand Down Expand Up @@ -2864,14 +2886,14 @@ def to_list(input):
RelaxedBooleanPair,
RelaxedNumberPair,
TensorOrArrayPair,
TypedStoragePair,
StringPair,
SetPair,
TypePair,
ObjectPair,
),
sequence_types=(
Sequence,
torch.storage.TypedStorage,
pmeier marked this conversation as resolved.
Show resolved Hide resolved
Sequential,
ModuleList,
ParameterList,
Expand Down