Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensordict/_td.py
Original file line number Diff line number Diff line change
Expand Up @@ -2708,8 +2708,8 @@ def _memmap_(
if inplace:
self._is_memmap = True
self._is_shared = False # since they are mutually exclusive
if hasattr(self, "_validate_value_cached"):
delattr(self, "_validate_value_cached")
# if hasattr(self, "_validate_value_cached"):
# self._validate_value_cached = None
self._device = torch.device("cpu")
else:
dest._is_memmap = True
Expand Down
66 changes: 40 additions & 26 deletions tensordict/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2926,8 +2926,8 @@ def dtype(self):
return self._dtype()

def _batch_size_setter(self, new_batch_size: torch.Size) -> None:
if hasattr(self, "_validate_value_cached"):
delattr(self, "_validate_value_cached")
# if hasattr(self, "_validate_value_cached"):
# self._validate_value_cached = None
if new_batch_size == self.batch_size:
return
if self._lazy:
Expand Down Expand Up @@ -4553,17 +4553,17 @@ def clear_device_(self) -> T:

"""
self._device = None
if hasattr(self, "_validate_value_cached"):
delattr(self, "_validate_value_cached")
# if hasattr(self, "_validate_value_cached"):
# self._validate_value_cached = None
for value in self.values():
if _is_tensor_collection(type(value)):
value.clear_device_()
return self

def _set_device(self, device: torch.device) -> T:
self._device = device
if hasattr(self, "_validate_value_cached"):
delattr(self, "_validate_value_cached")
# if hasattr(self, "_validate_value_cached"):
# self._validate_value_cached = None
for value in self.values():
if _is_tensor_collection(type(value)):
value._set_device(device=device)
Expand Down Expand Up @@ -11550,26 +11550,38 @@ def _validate_key(self, key: NestedKey) -> NestedKey:
raise KeyError(_GENERIC_NESTED_ERR.format(key))
return key

@property
def _validate_value(self):
if is_compiling():
return self._validate_value_generic
try:
return self._validate_value_cached
except AttributeError:
if self.device:
if self.batch_size:
self._validate_value_cached = self._validate_value_generic
else:
self._validate_value_cached = self._validate_value_batchfree
else:
if self.batch_size:
self._validate_value_cached = self._validate_value_devicefree
else:
self._validate_value_cached = (
self._validate_value_batchfree_devicefree
)
return self._validate_value_cached
# TODO: figure out how to make this work, as it brings some potential speed-up.
# We don't want to run any check (batch size or device)
# as these incur overhead. We want to cache which validation has to be used for which TD.
# Refs to self such as these create pseudo-mem leaks where the gc fails to collect the TD's tensors.
# See issue #1309 for ref.
# See #1310 for the revert.
# _validate_value_cached: Callable[[Any], Any] | None = None
#
# def _validate_value(self):
# if is_compiling():
# return self._validate_value_generic
# _validate_value_cached = self._validate_value_cached
# if _validate_value_cached is None:
# if self.device:
# if self.batch_size:
# _validate_value_cached = self._validate_value_cached = (
# self._validate_value_generic
# )
# else:
# _validate_value_cached = self._validate_value_cached = (
# self._validate_value_batchfree
# )
# else:
# if self.batch_size:
# _validate_value_cached = self._validate_value_cached = (
# self._validate_value_devicefree
# )
# else:
# _validate_value_cached = self._validate_value_cached = (
# self._validate_value_batchfree_devicefree
# )
# return _validate_value_cached

def _validate_value_generic(
self,
Expand Down Expand Up @@ -11631,6 +11643,8 @@ def _validate_value_generic(
self.names = value.names[: self.batch_dims]
return value

_validate_value = _validate_value_generic

def _validate_value_batchfree(
self,
value: CompatibleType | dict[str, CompatibleType],
Expand Down
Loading