Skip to content
Branch: master
Find file Copy path
Find file Copy path
2 contributors

Users who have contributed to this file

@zhaoyue-zephyrus @hellock
85 lines (64 sloc) 2.22 KB
import functools
import torch
def assert_tensor_type(func):
def wrapper(*args, **kwargs):
if not isinstance(args[0].data, torch.Tensor):
raise AttributeError('{} has no attribute {} for type {}'.format(
args[0].__class__.__name__, func.__name__, args[0].datatype))
return func(*args, **kwargs)
return wrapper
class DataContainer(object):
"""A container for any type of objects.
Typically tensors will be stacked in the collate function and sliced along
some dimension in the scatter function. This behavior has some limitations.
1. All tensors have to be the same size.
2. Types are limited (numpy array or Tensor).
We design `DataContainer` and `MMDataParallel` to overcome these
limitations. The behavior can be either of the following.
- copy to GPU, pad all tensors to the same size and stack them
- copy to GPU without stacking
- leave the objects as is and pass it to the model
- pad_dims specifies the number of last few dimensions to do padding
def __init__(self,
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert pad_dims in [None, 1, 2, 3]
self._pad_dims = pad_dims
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(
def data(self):
return self._data
def datatype(self):
if isinstance(, torch.Tensor):
return type(
def cpu_only(self):
return self._cpu_only
def stack(self):
return self._stack
def padding_value(self):
return self._padding_value
def pad_dims(self):
return self._pad_dims
def size(self, *args, **kwargs):
return*args, **kwargs)
def dim(self):
You can’t perform that action at this time.