diff --git a/docs/source/storage.rst b/docs/source/storage.rst index 28cf4444fbc97..84fed2f659a7b 100644 --- a/docs/source/storage.rst +++ b/docs/source/storage.rst @@ -22,6 +22,10 @@ holds the data as an untyped array of bytes. Every strided :class:`torch.Tensor` contains a :class:`torch.TypedStorage`, which stores all of the data that the :class:`torch.Tensor` views. +.. warning:: + All storage classes except for :class:`torch.UntypedStorage` will be removed + in the future, and :class:`torch.UntypedStorage` will be used in all cases. + .. autoclass:: torch.TypedStorage :members: :undoc-members: diff --git a/test/test_torch.py b/test/test_torch.py index ae302c1a20d29..eb91423e73b42 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -6430,6 +6430,56 @@ def test_storage_casts(self): self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage') self.assertIs(complexdouble_storage.dtype, torch.complex128) + def test_typed_storage_deprecation_warning(self): + s0 = torch.FloatStorage(10) + + funcs = [ + lambda: torch.FloatStorage(), + lambda: torch.FloatStorage.dtype, + lambda: s0.fill_(0), + lambda: s0.is_cuda, + lambda: s0.untyped(), + lambda: len(s0), + lambda: s0[0], + ] + + if torch.cuda.is_available(): + s1 = torch.cuda.FloatStorage(10) + + funcs += [ + lambda: torch.cuda.FloatStorage(), + lambda: torch.cuda.FloatStorage.dtype, + lambda: s1.fill_(0), + lambda: s1.is_cuda, + lambda: s1.untyped(), + lambda: len(s1), + lambda: s1[0], + ] + + # Check that each of the TypedStorage function calls produce a warning + # if warnings are reset between each + for f in funcs: + with warnings.catch_warnings(record=True) as w: + f() + self.assertEqual(len(w), 1) + warning = w[0].message + self.assertTrue(warning, DeprecationWarning) + self.assertTrue(re.search( + '^TypedStorage is deprecated', + str(warning))) + + # Check that only one warning is raised from calling multiple + # TypedStorage functions if warnings are not reset between each + with warnings.catch_warnings(record=True) as w: + for f in funcs: + f() + self.assertEqual(len(w), 1) + warning = w[0].message + self.assertTrue(warning, DeprecationWarning) + self.assertTrue(re.search( + '^TypedStorage is deprecated', + str(warning))) + def test_from_file(self): def assert_with_filename(filename): size = 10000 diff --git a/torch/__init__.py b/torch/__init__.py index c5c3e69ddcb8f..029aad05c1402 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -647,7 +647,7 @@ def is_warn_always_enabled(): ################################################################################ from ._tensor import Tensor -from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage +from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal # NOTE: New Storage classes should never be added. When adding a new # dtype, use torch.storage.TypedStorage directly. @@ -655,86 +655,103 @@ def is_warn_always_enabled(): class ByteStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.uint8 class DoubleStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.double class FloatStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.float class HalfStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.half class LongStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.long class IntStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.int class ShortStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.short class CharStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.int8 class BoolStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.bool class BFloat16Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.bfloat16 class ComplexDoubleStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.cdouble class ComplexFloatStorage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.cfloat class QUInt8Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.quint8 class QInt8Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.qint8 class QInt32Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.qint32 class QUInt4x2Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.quint4x2 class QUInt2x4Storage(_LegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.quint2x4 _storage_classes = { diff --git a/torch/cuda/__init__.py b/torch/cuda/__init__.py index 115e1c1036cb2..d7c83ebfc739b 100644 --- a/torch/cuda/__init__.py +++ b/torch/cuda/__init__.py @@ -724,11 +724,12 @@ def type(self, *args, **kwargs): __new__ = _lazy_new -from torch.storage import _LegacyStorage +from torch.storage import _LegacyStorage, _warn_typed_storage_removal class _CudaLegacyStorage(_LegacyStorage): @classmethod def from_buffer(cls, *args, **kwargs): + _warn_typed_storage_removal() raise RuntimeError('from_buffer: Not available for CUDA storage') @classmethod @@ -742,61 +743,73 @@ def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None): class ByteStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.uint8 class DoubleStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.double class FloatStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.float class HalfStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.half class LongStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.long class IntStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.int class ShortStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.short class CharStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.int8 class BoolStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.bool class BFloat16Storage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.bfloat16 class ComplexDoubleStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.cdouble class ComplexFloatStorage(_CudaLegacyStorage): @classproperty def dtype(self): + _warn_typed_storage_removal() return torch.cfloat del _LegacyStorage diff --git a/torch/storage.py b/torch/storage.py index 8e35973405b1b..b2d5aed569270 100644 --- a/torch/storage.py +++ b/torch/storage.py @@ -7,6 +7,7 @@ import copy import collections from functools import lru_cache +import warnings try: import numpy as np HAS_NUMPY = True @@ -305,12 +306,20 @@ def _isint(x): else: return isinstance(x, int) +def _warn_typed_storage_removal(): + warnings.warn( + "TypedStorage is deprecated. It will be removed in the future and " + "UntypedStorage will be the only storage class. This should only matter " + "to you if you are using storages directly.", + DeprecationWarning) + class TypedStorage: is_sparse = False dtype: torch.dtype def fill_(self, value): + _warn_typed_storage_removal() self[0:len(self)] = value return self @@ -382,6 +391,7 @@ def __new__(cls, *args, wrap_storage=None, dtype=None, device=None): dtype=cls.dtype) def __init__(self, *args, device=None, dtype=None, wrap_storage=None): + _warn_typed_storage_removal() arg_error_msg = ( 'TypedStorage.__init__ received an invalid combination ' 'of arguments. Expected one of:\n' @@ -450,10 +460,12 @@ def __init__(self, *args, device=None, dtype=None, wrap_storage=None): @property def is_cuda(self): + _warn_typed_storage_removal() return self.device.type == 'cuda' def untyped(self): """Returns the internal :class:`torch.UntypedStorage`""" + _warn_typed_storage_removal() return self._storage def _new_wrapped_storage(self, untyped_storage): @@ -465,6 +477,7 @@ def _new_wrapped_storage(self, untyped_storage): return type(self)(wrap_storage=untyped_storage) def __len__(self): + _warn_typed_storage_removal() return self._storage.nbytes() // self.element_size() def _maybe_wrap_index(self, idx, is_stop=False): @@ -493,6 +506,7 @@ def _maybe_wrap_index(self, idx, is_stop=False): return idx % self.size() def __setitem__(self, idx, value): + _warn_typed_storage_removal() if not isinstance(idx, (int, slice)): raise RuntimeError(f"can't index a {type(self)} with {type(idx)}") if torch.is_storage(value): @@ -515,6 +529,7 @@ def __setitem__(self, idx, value): tmp_tensor[idx] = value def __getitem__(self, idx): + _warn_typed_storage_removal() if self.device.type == 'meta': raise NotImplementedError("Not available for 'meta' device type") @@ -544,13 +559,16 @@ def __getitem__(self, idx): return tmp_tensor[idx_wrapped].item() def copy_(self, source: T, non_blocking: bool = None): + _warn_typed_storage_removal() self._storage.copy_(source.untyped(), non_blocking) return self def nbytes(self): + _warn_typed_storage_removal() return self._storage.nbytes() def type(self, dtype: str = None, non_blocking: bool = False) -> Union[T, str]: + _warn_typed_storage_removal() if dtype is None: legacy_class = self._get_legacy_storage_class() @@ -563,18 +581,22 @@ def type(self, dtype: str = None, non_blocking: bool = False) -> Union[T, str]: return self._storage.type(dtype, non_blocking) def cuda(self, device=None, non_blocking=False, **kwargs) -> T: + _warn_typed_storage_removal() if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]: raise RuntimeError("Cannot create CUDA storage with quantized dtype") cuda_storage: torch.UntypedStorage = self._storage.cuda(device, non_blocking, **kwargs) return self._new_wrapped_storage(cuda_storage) def element_size(self): + _warn_typed_storage_removal() return torch._utils._element_size(self.dtype) def get_device(self) -> int: + _warn_typed_storage_removal() return self._storage.get_device() def __str__(self): + _warn_typed_storage_removal() info_str = ( f'[{torch.typename(self)}(dtype={self.dtype}, ' f'device={self.device}) of size {len(self)}]') @@ -585,34 +607,43 @@ def __str__(self): return data_str + '\n' + info_str def __repr__(self): + _warn_typed_storage_removal() return str(self) def __iter__(self): + _warn_typed_storage_removal() return iter(map(lambda i: self[i], range(self.size()))) def __copy__(self): + _warn_typed_storage_removal() return self._new_wrapped_storage(copy.copy(self._storage)) def __deepcopy__(self, memo): + _warn_typed_storage_removal() return self._new_wrapped_storage(copy.deepcopy(self._storage, memo)) def __sizeof__(self): + _warn_typed_storage_removal() return super(TypedStorage, self).__sizeof__() + self.nbytes() def clone(self): """Returns a copy of this storage""" + _warn_typed_storage_removal() return self._new_wrapped_storage(self._storage.clone()) def tolist(self): """Returns a list containing the elements of this storage""" + _warn_typed_storage_removal() return list(self) def cpu(self): """Returns a CPU copy of this storage if it's not already on the CPU""" + _warn_typed_storage_removal() return self._new_wrapped_storage(self._storage.cpu()) def pin_memory(self): """Coppies the storage to pinned memory, if it's not already pinned.""" + _warn_typed_storage_removal() return self._new_wrapped_storage(self._storage.pin_memory()) def share_memory_(self): @@ -624,6 +655,7 @@ def share_memory_(self): Returns: self """ + _warn_typed_storage_removal() self._storage.share_memory_() return self @@ -643,26 +675,32 @@ def _cdata(self): @property def device(self): + _warn_typed_storage_removal() return self._storage.device def size(self): + _warn_typed_storage_removal() return len(self) def pickle_storage_type(self): + _warn_typed_storage_removal() try: return _dtype_to_storage_type_map()[self.dtype] except KeyError: raise KeyError(f'dtype {self.dtype} is not recognized') def __reduce__(self): + _warn_typed_storage_removal() b = io.BytesIO() torch.save(self, b, _use_new_zipfile_serialization=False) return (_load_from_bytes, (b.getvalue(),)) def data_ptr(self): + _warn_typed_storage_removal() return self._storage.data_ptr() def resize_(self, size): + _warn_typed_storage_removal() self._storage.resize_(size * self.element_size()) @classmethod @@ -674,6 +712,7 @@ def _weak_ref(self, *args, **kwargs): @classmethod def from_buffer(cls, *args, dtype=None, device=None, **kwargs): + _warn_typed_storage_removal() if cls == TypedStorage: dtype = torch.get_default_dtype() if dtype is None else dtype device = torch.device('cpu' if device is None else device) @@ -706,50 +745,62 @@ def _to(self, dtype): def double(self): """Casts this storage to double type""" + _warn_typed_storage_removal() return self._to(torch.double) def float(self): """Casts this storage to float type""" + _warn_typed_storage_removal() return self._to(torch.float) def half(self): """Casts this storage to half type""" + _warn_typed_storage_removal() return self._to(torch.half) def long(self): """Casts this storage to long type""" + _warn_typed_storage_removal() return self._to(torch.long) def int(self): """Casts this storage to int type""" + _warn_typed_storage_removal() return self._to(torch.int) def short(self): """Casts this storage to short type""" + _warn_typed_storage_removal() return self._to(torch.short) def char(self): """Casts this storage to char type""" + _warn_typed_storage_removal() return self._to(torch.int8) def byte(self): """Casts this storage to byte type""" + _warn_typed_storage_removal() return self._to(torch.uint8) def bool(self): """Casts this storage to bool type""" + _warn_typed_storage_removal() return self._to(torch.bool) def bfloat16(self): """Casts this storage to bfloat16 type""" + _warn_typed_storage_removal() return self._to(torch.bfloat16) def complex_double(self): """Casts this storage to complex double type""" + _warn_typed_storage_removal() return self._to(torch.cdouble) def complex_float(self): """Casts this storage to complex float type""" + _warn_typed_storage_removal() return self._to(torch.cfloat) @classmethod @@ -771,6 +822,7 @@ def from_file(cls, filename, shared, size): shared (bool): whether to share memory size (int): number of elements in the storage """ + _warn_typed_storage_removal() if cls == TypedStorage: raise RuntimeError('from_file can only be called on derived classes') untyped_storage: UntypedStorage = UntypedStorage.from_file( @@ -785,6 +837,7 @@ def _expired(cls, *args, **kwargs): return UntypedStorage._expired(*args, **kwargs) def is_pinned(self): + _warn_typed_storage_removal() return self._storage.is_pinned() def _write_file(self, *args, **kwargs): @@ -800,6 +853,7 @@ def _share_cuda_(self, *args, **kwargs): return self._storage._share_cuda_(*args, **kwargs) def is_shared(self): + _warn_typed_storage_removal() return self._storage.is_shared() @classmethod