Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace AT_CHECK with TORCH_CHECK [shard 1/10] #20426

Closed
wants to merge 11 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion aten/src/ATen/CPUApplyUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ inline std::pair<int64_t, int64_t> collapse_dims(
T* strides,
int64_t dims,
const int excludeDim = -1) {
AT_CHECK(
TORCH_CHECK(
excludeDim >= -1 && excludeDim < dims,
"expected excluded dim between -1 and dims - 1");

Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/ExpandUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b) {
int64_t sizeA = (dimA >= 0) ? a[dimA] : 1;
int64_t sizeB = (dimB >= 0) ? b[dimB] : 1;

AT_CHECK(
TORCH_CHECK(
sizeA == sizeB || sizeA == 1 || sizeB == 1,
"The size of tensor a (", sizeA,
") must match the size of tensor b (", sizeB,
Expand Down Expand Up @@ -53,7 +53,7 @@ std::tuple<std::vector<int64_t>, std::vector<int64_t>> inferExpandGeometry(
: expandedSizes[i + 1] * expandedStrides[i + 1];
int64_t targetSize = sizes[i];
if (targetSize == -1) {
AT_CHECK(
TORCH_CHECK(
dim >= 0,
"The expanded size of the tensor (",
targetSize,
Expand All @@ -62,7 +62,7 @@ std::tuple<std::vector<int64_t>, std::vector<int64_t>> inferExpandGeometry(
targetSize = size;
}
if (size != targetSize) {
AT_CHECK(
TORCH_CHECK(
size == 1,
"The expanded size of the tensor (",
targetSize,
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/InferSize.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
// works yet
// empty_tensor.view(-1, 0)
// doesn't.
AT_CHECK(newsize != 0, "cannot reshape tensor of 0 elements into shape ",
TORCH_CHECK(newsize != 0, "cannot reshape tensor of 0 elements into shape ",
shape, " because the unspecified dimension size -1 can be any "
"value and is ambiguous");
res[*infer_dim] = numel / newsize;
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/MatrixRef.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ namespace at {
/// Construct an MatrixRef from an ArrayRef and outer stride.
/*implicit*/ MatrixRef(ArrayRef<T> arr, size_type stride0)
: arr(arr), stride0(stride0) {
AT_CHECK(arr.size() % stride0 == 0, "MatrixRef: ArrayRef size ", arr.size(), " not divisible by stride ", stride0)
TORCH_CHECK(arr.size() % stride0 == 0, "MatrixRef: ArrayRef size ", arr.size(), " not divisible by stride ", stride0)
}

/// @}
Expand All @@ -59,7 +59,7 @@ namespace at {
} else if (dim == 1) {
return stride0;
} else {
AT_CHECK(0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1");
TORCH_CHECK(0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1");
}
}

Expand Down
28 changes: 14 additions & 14 deletions aten/src/ATen/SparseTensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ int64_t SparseTensorImpl::dim() const {
return sparse_dim_ + dense_dim_;
}
TensorImpl* SparseTensorImpl::maybe_zero_dim(bool condition_when_zero_dim) {
AT_CHECK(condition_when_zero_dim == (dim() == 0),
TORCH_CHECK(condition_when_zero_dim == (dim() == 0),
"Attempted to maybe_zero_dim on a SparseTensorImpl to ", condition_when_zero_dim,
" but the SparseTensor's dim() is ", dim(), " and SparseTensors do not support"
" changing dimensionality via maybe_zero_dim");
Expand All @@ -90,29 +90,29 @@ int64_t SparseTensorImpl::storage_offset() const {
AT_ERROR("sparse tensors do not have storage");
}
void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, const Tensor& values) {
AT_CHECK(allow_tensor_metadata_change(), "set_indices_and_values_unsafe is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(allow_tensor_metadata_change(), "set_indices_and_values_unsafe is not allowed on Tensor created from .data or .detach()");
AT_ASSERT(!indices.is_variable() && !values.is_variable()); // They should be plain tensors! // TODO: change this to check `.requires_grad()` and `GradMode::is_enabled()` when Variable and Tensor are merged

AT_CHECK(!indices.is_sparse(), "expected indices to be a dense tensor, but got indices of layout ", indices.layout());
AT_CHECK(!values.is_sparse(), "expected values to be a dense tensor, but got values of layout ", values.layout());
TORCH_CHECK(!indices.is_sparse(), "expected indices to be a dense tensor, but got indices of layout ", indices.layout());
TORCH_CHECK(!values.is_sparse(), "expected values to be a dense tensor, but got values of layout ", values.layout());

AT_CHECK(values.device().type() == device().type(), "device type of values (", values.device().type(), ") must match device type of device().type()", device().type(), ")");
AT_CHECK(values.scalar_type() == typeMetaToScalarType(dtype()), "dtype of values (", values.scalar_type(), ") must match dtype of sparse tensor (", typeMetaToScalarType(dtype()), ")");
AT_CHECK(indices.scalar_type() == kLong, "indices must be an int64 tensor");
AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")");
AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")");
TORCH_CHECK(values.device().type() == device().type(), "device type of values (", values.device().type(), ") must match device type of device().type()", device().type(), ")");
TORCH_CHECK(values.scalar_type() == typeMetaToScalarType(dtype()), "dtype of values (", values.scalar_type(), ") must match dtype of sparse tensor (", typeMetaToScalarType(dtype()), ")");
TORCH_CHECK(indices.scalar_type() == kLong, "indices must be an int64 tensor");
TORCH_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")");
TORCH_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")");

AT_CHECK(indices.dim() == 2, "indices must be sparse_dim x nnz, but got: ", indices.sizes());
AT_CHECK(indices.size(1) == values.size(0), "indices and values must have same nnz, but got nnz from indices: ", indices.size(1), ", nnz from values: ", values.size(0));
AT_CHECK(indices.size(0) == sparse_dim_, "indices has incorrect first dimension, expected ", sparse_dim_, ", got ", indices.size(0));
AT_CHECK(values.dim() == dense_dim_ + 1, "values has incorrect number of dimensions, expected ", dense_dim_ + 1, ", got ", values.dim());
TORCH_CHECK(indices.dim() == 2, "indices must be sparse_dim x nnz, but got: ", indices.sizes());
TORCH_CHECK(indices.size(1) == values.size(0), "indices and values must have same nnz, but got nnz from indices: ", indices.size(1), ", nnz from values: ", values.size(0));
TORCH_CHECK(indices.size(0) == sparse_dim_, "indices has incorrect first dimension, expected ", sparse_dim_, ", got ", indices.size(0));
TORCH_CHECK(values.dim() == dense_dim_ + 1, "values has incorrect number of dimensions, expected ", dense_dim_ + 1, ", got ", values.dim());

auto dense_size_original = sizes().slice(sparse_dim_);
std::vector<int64_t> expected_values_size_vec = {values.size(0)};
expected_values_size_vec.insert(expected_values_size_vec.end(), dense_size_original.begin(), dense_size_original.end());
IntArrayRef expected_values_size(expected_values_size_vec);
auto new_values_size = values.sizes();
AT_CHECK(
TORCH_CHECK(
std::equal(expected_values_size.begin(), expected_values_size.end(), new_values_size.begin()),
"values has incorrect size, expected ", expected_values_size, ", got ", new_values_size
);
Expand Down
22 changes: 11 additions & 11 deletions aten/src/ATen/SparseTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
// WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim with
// respect to indices and values
void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
AT_CHECK(allow_tensor_metadata_change(), "raw_resize_ is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(allow_tensor_metadata_change(), "raw_resize_ is not allowed on Tensor created from .data or .detach()");
sizes_ = size.vec();
sparse_dim_ = sparse_dim;
dense_dim_ = dense_dim;
Expand Down Expand Up @@ -87,19 +87,19 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
// 4. When we attempt to shrink the size of any of the sparse dimensions on a non-empty sparse tensor
// (this could make some of the stored indices out-of-bound and thus unsafe).
void resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
AT_CHECK(allow_tensor_metadata_change(), "resize_ is not allowed on Tensor created from .data or .detach()");
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
TORCH_CHECK(allow_tensor_metadata_change(), "resize_ is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
if (nnz() > 0) {
auto alt_options_msg = "You could try the following options:\n\
1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
2. If you need to resize this tensor, you have the following options:\n\
1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";

AT_CHECK(sparse_dim == sparse_dim_,
TORCH_CHECK(sparse_dim == sparse_dim_,
"changing the number of sparse dimensions (from ", sparse_dim_, " to ", sparse_dim, ") on a non-empty sparse tensor is not supported.\n", alt_options_msg);

AT_CHECK(dense_dim == dense_dim_,
TORCH_CHECK(dense_dim == dense_dim_,
"changing the number of dense dimensions (from ", dense_dim_, " to ", dense_dim, ") on a non-empty sparse tensor is not supported.\n", alt_options_msg);

bool shrinking_sparse_dims = false;
Expand All @@ -121,10 +121,10 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
}
}

AT_CHECK(!shrinking_sparse_dims,
TORCH_CHECK(!shrinking_sparse_dims,
"shrinking the size of sparse dimensions (from ", sparse_size_original, " to ", sparse_size_new, ") on a non-empty sparse tensor is not supported.\n", alt_options_msg);

AT_CHECK(!shrinking_dense_dim,
TORCH_CHECK(!shrinking_dense_dim,
"shrinking the size of dense dimensions (from ", dense_size_original, " to ", dense_size_new, ") on a non-empty sparse tensor is not supported.\n", alt_options_msg);
}

Expand All @@ -145,8 +145,8 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {

// NOTE: this function will resize the sparse tensor and also set `indices` and `values` to empty.
void resize_and_clear_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
AT_CHECK(allow_tensor_metadata_change(), "resize_and_clear_ is not allowed on Tensor created from .data or .detach()");
AT_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());
TORCH_CHECK(allow_tensor_metadata_change(), "resize_and_clear_ is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(sparse_dim + dense_dim == static_cast<int64_t>(size.size()), "number of dimensions must be sparse_dim (", sparse_dim, ") + dense_dim (", dense_dim, "), but got ", size.size());

sizes_ = size.vec();
sparse_dim_ = sparse_dim;
Expand All @@ -162,13 +162,13 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
}

void set_coalesced(bool coalesced) {
AT_CHECK(allow_tensor_metadata_change(), "set_coalesced is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(allow_tensor_metadata_change(), "set_coalesced is not allowed on Tensor created from .data or .detach()");
coalesced_ = coalesced;
}

// NOTE: this function is only used internally and not exposed to Python frontend
void set_nnz_and_narrow(int64_t new_nnz) {
AT_CHECK(allow_tensor_metadata_change(), "set_nnz_and_narrow is not allowed on Tensor created from .data or .detach()");
TORCH_CHECK(allow_tensor_metadata_change(), "set_nnz_and_narrow is not allowed on Tensor created from .data or .detach()");
AT_ASSERT(new_nnz <= nnz());
indices_ = indices_.narrow(1, 0, new_nnz);
values_ = values_.narrow(0, 0, new_nnz);
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/TensorGeometry.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ struct CAFFE2_API TensorGeometry {

TensorGeometry transpose(int64_t dim0, int64_t dim1) {
TensorGeometry r = *this; // copy
AT_CHECK(dim0 < dim(), "transpose: dim0=", dim0, " out of range (dim=", dim(), ")")
AT_CHECK(dim1 < dim(), "transpose: dim1=", dim1, " out of range (dim=", dim(), ")")
TORCH_CHECK(dim0 < dim(), "transpose: dim0=", dim0, " out of range (dim=", dim(), ")")
TORCH_CHECK(dim1 < dim(), "transpose: dim1=", dim1, " out of range (dim=", dim(), ")")
std::swap(r.sizes_[dim0], r.sizes_[dim1]);
std::swap(r.strides_[dim0], r.strides_[dim1]);
return r;
Expand Down
32 changes: 16 additions & 16 deletions aten/src/ATen/TensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,21 @@ std::ostream& operator<<(std::ostream & out, TensorGeometryArg t) {
}

void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim) {
AT_CHECK(t->dim() == dim,
TORCH_CHECK(t->dim() == dim,
"Expected ", dim, "-dimensional tensor, but got ", t->dim(),
"-dimensional tensor for ", t," (while checking arguments for ", c, ")");
}

void checkDimRange(CheckedFrom c, const TensorGeometryArg& t, int64_t dim_start, int64_t dim_end) {
AT_CHECK(
TORCH_CHECK(
t->dim() >= dim_start && t->dim() < dim_end,
"Expected ", dim_start, " to ", (dim_end - 1), " dimensions, but got ",
t->dim(), "-dimensional tensor for ", t, " (while checking arguments for ",
c, ")");
}

void checkContiguous(CheckedFrom c, const TensorGeometryArg& t) {
AT_CHECK(
TORCH_CHECK(
t->is_contiguous(),
"Expected contiguous tensor, but got non-contiguous tensor for ", t,
" (while checking arguments for ", c, ")");
Expand All @@ -49,14 +49,14 @@ void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts) {

void checkSize(CheckedFrom c, const TensorGeometryArg& t, IntArrayRef sizes) {
checkDim(c, t, sizes.size());
AT_CHECK(
TORCH_CHECK(
t->sizes().equals(sizes),
"Expected tensor of size ", sizes, ", but got tensor of size ", t->sizes(),
" for ", t, " (while checking arguments for ", c, ")");
}

void checkSize(CheckedFrom c, const TensorGeometryArg& t, int64_t dim, int64_t size) {
AT_CHECK(
TORCH_CHECK(
t->size(dim) == size,
"Expected tensor to have size ", size, " at dimension ", dim,
", but got size ", t->size(dim), " for ", t,
Expand All @@ -76,7 +76,7 @@ void checkAllSame(CheckedFrom c, ArrayRef<TensorArg> tensors, void(*fn)(CheckedF
}

void checkSameSize(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
AT_CHECK(
TORCH_CHECK(
t1->sizes().equals(t2->sizes()),
"Expected tensor for ", t1, " to have same size as tensor for ", t2,
"; but ", t1->sizes(), " does not equal ", t2->sizes(),
Expand All @@ -88,15 +88,15 @@ void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors) {
}

void checkNumel(CheckedFrom c, const TensorGeometryArg& t, int64_t numel) {
AT_CHECK(
TORCH_CHECK(
t->numel() == numel,
"Expected tensor for ", t, " to have ", numel,
" elements; but it actually has ", t->numel(), " elements",
" (while checking arguments for ", c, ")");
}

void checkSameNumel(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
AT_CHECK(
TORCH_CHECK(
t1->numel() == t2->numel(),
"Expected tensor for ", t1,
" to have same number of elements as tensor for ", t2, "; but ",
Expand All @@ -121,7 +121,7 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
<< " to be on GPU (while checking arguments for " << c << ")";
AT_ERROR(oss.str());
}
AT_CHECK(
TORCH_CHECK(
t1->get_device() == t2->get_device(),
"Expected tensor for ", t1, " to have the same device as tensor for ", t2,
"; but device ", t1->get_device(), " does not equal ", t2->get_device(),
Expand All @@ -133,15 +133,15 @@ void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors) {
}

void checkSameType(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
AT_CHECK(
TORCH_CHECK(
t1->type() == t2->type(),
"Expected tensor for ", t1, " to have the same type as tensor for ", t2,
"; but type ", t1->toString(), " does not equal ", t2->toString(),
" (while checking arguments for ", c, ")");
}

void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType ty) {
AT_CHECK(
TORCH_CHECK(
t->scalar_type() == ty,
"Expected tensor for ", t, " to have scalar type ", toString(ty),
"; but got ", t->toString(), " instead (while checking arguments for ", c,
Expand Down Expand Up @@ -173,15 +173,15 @@ void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors) {
}

void checkSameDim(CheckedFrom c, const TensorGeometryArg& t1, const TensorGeometryArg& t2) {
AT_CHECK(
TORCH_CHECK(
t1->dim() == t2->dim(),
"Expected tensor for ", t1, " to have the same dimension as tensor for ",
t2, "; but ", t1->dim(), " does not equal ", t2->dim(),
" (while checking arguments for ", c, ")");
}

void checkDefined(CheckedFrom c, const TensorArg& t) {
AT_CHECK(
TORCH_CHECK(
t->defined(),
"Expected tensor for ", t, " to be non-null, but it was undefined ",
" (while checking arguments for ", c, ")");
Expand All @@ -195,7 +195,7 @@ void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
}

void checkBackend(CheckedFrom c, const Tensor& t, Backend backend) {
AT_CHECK(
TORCH_CHECK(
!t.defined() || t.type().backend() == backend,
"Expected tensor to have ", toString(backend),
" Backend, but got tensor with ", toString(t.type().backend()), " Backend ",
Expand All @@ -209,7 +209,7 @@ void checkBackend(CheckedFrom c, at::ArrayRef<Tensor> tensors, at::Backend backe
}

void checkDeviceType(CheckedFrom c, const Tensor& t, DeviceType device_type) {
AT_CHECK(
TORCH_CHECK(
!t.defined() || t.type().device_type() == device_type,
"Expected tensor to have ", device_type,
" DeviceType, but got tensor with ", t.type().device_type(), " DeviceType ",
Expand All @@ -223,7 +223,7 @@ void checkDeviceType(CheckedFrom c, at::ArrayRef<Tensor> tensors, at::DeviceType
}

void checkLayout(CheckedFrom c, const Tensor& t, Layout layout) {
AT_CHECK(
TORCH_CHECK(
!t.defined() || t.layout() == layout,
"Expected tensor to have ", layout,
" Layout, but got tensor with ", t.layout(), " Layout ",
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/WrapDimUtilsMulti.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ namespace at {
constexpr size_t dim_bitset_size = 64;

static inline std::bitset<dim_bitset_size> dim_list_to_bitset(IntArrayRef dims, int64_t ndims) {
AT_CHECK(ndims <= (int64_t) dim_bitset_size, "only tensors with up to ", dim_bitset_size, " dims are supported");
TORCH_CHECK(ndims <= (int64_t) dim_bitset_size, "only tensors with up to ", dim_bitset_size, " dims are supported");
std::bitset<dim_bitset_size> seen;
for (size_t i = 0; i < dims.size(); i++) {
size_t dim = maybe_wrap_dim(dims[i], ndims);
AT_CHECK(!seen[dim], "dim ", dim, " appears multiple times in the list of dims");
TORCH_CHECK(!seen[dim], "dim ", dim, " appears multiple times in the list of dims");
seen[dim] = true;
}
return seen;
Expand Down
Loading