Skip to content

Commit

Permalink
Replace AT_CHECK with TORCH_CHECK [shard 6/10]
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: #20430

Reviewed By: jerryzh168

Differential Revision: D15318250

fbshipit-source-id: eaee93447d757124a0c9fb5dcde503ae6a065912
  • Loading branch information
ezyang authored and facebook-github-bot committed May 14, 2019
1 parent 5b45355 commit 358fb51
Show file tree
Hide file tree
Showing 16 changed files with 103 additions and 103 deletions.
4 changes: 2 additions & 2 deletions aten/src/ATen/native/cuda/Lerp.cu
Expand Up @@ -76,7 +76,7 @@ Tensor& lerp_cuda_tensor_(Tensor& self, const Tensor& end, const Tensor& weight)
Tensor& lerp_cuda_scalar_(Tensor& self, const Tensor& end, Scalar weight) {
Tensor b_self, b_end;
std::tie(b_self, b_end) = expand_outplace(self, end, "lerp__cuda");
AT_CHECK(b_self.sizes() == self.sizes(),
TORCH_CHECK(b_self.sizes() == self.sizes(),
"output with shape ", self.sizes(),
" doesn't match the broadcast shape ", b_self.sizes());
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lerp__cuda", [&]{
Expand All @@ -87,7 +87,7 @@ Tensor& lerp_cuda_scalar_(Tensor& self, const Tensor& end, Scalar weight) {

Tensor lerp_cuda_tensor(const Tensor& self, const Tensor& end, const Tensor& weight) {
Tensor b_self, b_end, b_weight;
AT_CHECK(weight.dim() <= std::max(self.dim(), end.dim()),
TORCH_CHECK(weight.dim() <= std::max(self.dim(), end.dim()),
"weight should be of dimension max(self.dim(), end.dim()) or lesser");
std::tie(b_self, b_end, b_weight) = expand_outplace(self, end, weight, "lerp_cuda");
Tensor result = at::empty_like(b_self);
Expand Down
10 changes: 5 additions & 5 deletions aten/src/ATen/native/cuda/LossCTC.cu
Expand Up @@ -179,9 +179,9 @@ std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const

int64_t batch_size = log_probs.size(1);
int64_t num_labels = log_probs.size(2);
AT_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
AT_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
AT_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");
TORCH_CHECK((0 <= BLANK) && (BLANK < num_labels), "blank must be in label range");
TORCH_CHECK(input_lengths.size() == batch_size, "input_lengths must be of size batch_size");
TORCH_CHECK(target_lengths.size() == batch_size, "target_lengths must be of size batch_size");

int64_t lp_input_stride = log_probs.stride(0);
int64_t lp_char_stride = log_probs.stride(2);
Expand Down Expand Up @@ -211,13 +211,13 @@ std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const
}
tg_target_stride = targets.stride(1);
checkSize(c, targets_arg, 0, batch_size);
AT_CHECK(targets.size(1) >= max_target_length,
TORCH_CHECK(targets.size(1) >= max_target_length,
"Expected tensor to have size at least ", max_target_length, " at dimension 1, but got size ", targets.size(1), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
int64_t max_input_length = log_probs.size(0);
for (int64_t b = 0; b < batch_size; b++) {
AT_CHECK(input_lengths[b] <= max_input_length,
TORCH_CHECK(input_lengths[b] <= max_input_length,
"Expected tensor to have size at least ", max_input_length, " at dimension 1, but got size ", targets.size(0), " for ", targets_arg,
" (while checking arguments for ", c, ")");
}
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/Normalization.cu
Expand Up @@ -86,7 +86,7 @@ std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda(
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "batch_norm_backward", [&] {
auto mean_st = running_mean.dtype();
auto var_st = running_var.dtype();
AT_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types");
// <sigh> Some workloads depend on passing in half input and float stats, which is
// usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this
// one so it needs to support the same case, or people start to complain.
Expand Down
18 changes: 9 additions & 9 deletions aten/src/ATen/native/cuda/RangeFactories.cu
Expand Up @@ -38,7 +38,7 @@ struct LogspaceOp {
};

Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
AT_CHECK(steps >= 0, "number of steps must be non-negative");
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");

if (result.numel() != steps) {
result.resize_({steps});
Expand Down Expand Up @@ -68,7 +68,7 @@ Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t step
}

Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps, double base) {
AT_CHECK(steps >= 0, "number of steps must be non-negative");
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");

if (result.numel() != steps) {
result.resize_({steps});
Expand Down Expand Up @@ -105,11 +105,11 @@ Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();

AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
AT_CHECK(std::isfinite(static_cast<double>(xstart)) &&
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
Expand Down Expand Up @@ -152,14 +152,14 @@ Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
/ step.to<double>());
}

AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
AT_CHECK(std::isfinite(static_cast<double>(xstart)) &&
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");

AT_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);

Expand Down
32 changes: 16 additions & 16 deletions aten/src/ATen/native/cuda/ReflectionPad.cu
Expand Up @@ -148,14 +148,14 @@ __global__ void reflection_pad2d_backward_out_kernel(

void reflection_pad1d_out_template(
Tensor &output, const Tensor &input_, IntArrayRef padding) {
AT_CHECK(canUse32BitIndexMath(input_),
TORCH_CHECK(canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");

int64_t dim_plane = 0;
int64_t dim_w = 1;
int64_t nbatch = 1;

AT_CHECK(input_.numel() > 0 &&
TORCH_CHECK(input_.numel() > 0 &&
(input_.ndimension() == 2 || input_.ndimension() == 3), "non-empty 2D "
"or 3D (batch mode) tensor expected for input, but got: ", input_);

Expand All @@ -172,11 +172,11 @@ void reflection_pad1d_out_template(
int64_t input_w = input_.size(dim_w);
int64_t output_w = input_w + pad_l + pad_r;

AT_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less "
TORCH_CHECK(pad_l < input_w && pad_r < input_w, "Padding size should be less "
"than the corresponding input dimension, but got: padding (", pad_l, ", ",
pad_r, ") at dimension ", dim_w, " of input ", input_);

AT_CHECK(output_w >= 1,
TORCH_CHECK(output_w >= 1,
"input (W: ", input_w, ")is too small. Calculated output W: ", output_w);

if (input_.ndimension() == 2) {
Expand Down Expand Up @@ -206,10 +206,10 @@ void reflection_pad1d_backward_out_template(
Tensor & grad_input, const Tensor & grad_output_,
const Tensor & input, IntArrayRef padding) {

AT_CHECK(canUse32BitIndexMath(input),
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");

AT_CHECK(canUse32BitIndexMath(grad_output_),
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"input tensor must fit into 32-bit index math");

int64_t dim_plane = 0;
Expand All @@ -231,7 +231,7 @@ void reflection_pad1d_backward_out_template(

Tensor grad_output = grad_output_.contiguous();

AT_CHECK(output_w == grad_output.size(dim_w),
TORCH_CHECK(output_w == grad_output.size(dim_w),
"gradOutput width unexpected. Expected: ", output_w, ", Got: ",
grad_output.size(dim_w));

Expand All @@ -252,15 +252,15 @@ void reflection_pad1d_backward_out_template(

void reflection_pad2d_out_template(
Tensor &output, const Tensor &input_, IntArrayRef padding) {
AT_CHECK(canUse32BitIndexMath(input_),
TORCH_CHECK(canUse32BitIndexMath(input_),
"input tensor must fit into 32-bit index math");

int plane_dim = 0;
int dim_h = 1;
int dim_w = 2;
int nbatch = 1;

AT_CHECK(input_.numel() > 0 &&
TORCH_CHECK(input_.numel() > 0 &&
(input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or "
"4D (batch mode) tensor expected for input, but got: ", input_);

Expand All @@ -280,20 +280,20 @@ void reflection_pad2d_out_template(
int input_h = input_.size(dim_h);
int input_w = input_.size(dim_w);

AT_CHECK(pad_l < input_w && pad_r < input_w,
TORCH_CHECK(pad_l < input_w && pad_r < input_w,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_l, ", ", pad_r, ") at dimension ", dim_w,
" of input ", input_.sizes());

AT_CHECK(pad_t < input_h && pad_b < input_h,
TORCH_CHECK(pad_t < input_h && pad_b < input_h,
"Padding size should be less than the corresponding input dimension, but "
"got: padding (", pad_t, ", ", pad_b, ") at dimension ", dim_h,
" of input ", input_.sizes());

int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;

AT_CHECK(output_w >= 1 || output_h >= 1,
TORCH_CHECK(output_w >= 1 || output_h >= 1,
"input (H: ", input_h, ", W: ", input_w, ")is too small. Calculated "
"output H: ", output_h, " W: ", output_w);

Expand Down Expand Up @@ -326,9 +326,9 @@ void reflection_pad2d_out_template(
void reflection_pad2d_backward_out_template(
Tensor &grad_input, const Tensor &grad_output_,
const Tensor &input, IntArrayRef padding) {
AT_CHECK(canUse32BitIndexMath(input),
TORCH_CHECK(canUse32BitIndexMath(input),
"input tensor must fit into 32-bit index math");
AT_CHECK(canUse32BitIndexMath(grad_output_),
TORCH_CHECK(canUse32BitIndexMath(grad_output_),
"output gradient tensor must fit into 32-bit index math");

int plane_dim = 0;
Expand All @@ -355,9 +355,9 @@ void reflection_pad2d_backward_out_template(
int output_h = input_h + pad_t + pad_b;
int output_w = input_w + pad_l + pad_r;

AT_CHECK(output_w == grad_output_.size(dim_w), "grad_output width "
TORCH_CHECK(output_w == grad_output_.size(dim_w), "grad_output width "
"unexpected. Expected: ", output_w, ", Got: ", grad_output_.size(dim_w));
AT_CHECK(output_h == grad_output_.size(dim_h), "grad_output height "
TORCH_CHECK(output_h == grad_output_.size(dim_h), "grad_output height "
"unexpected. Expected: ", output_h, ", Got: ", grad_output_.size(dim_h));

Tensor grad_output = grad_output_.contiguous();
Expand Down

0 comments on commit 358fb51

Please sign in to comment.