Skip to content

Commit

Permalink
Replace all AT_ASSERTM in RNN_miopen.cpp (#51072)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #51072

AT_ASSERTM is deprecated and should be replaced by either TORCH_CHECK or
TORCH_INTERNAL_ASSERT, depending on the situation.

Test Plan: Imported from OSS

Reviewed By: ailzhang

Differential Revision: D26074364

Pulled By: ezyang

fbshipit-source-id: 742e28afe49e0a546c252a0fad487f93410d0cb5
  • Loading branch information
xuhdev authored and facebook-github-bot committed Jan 29, 2021
1 parent dfca1e4 commit ebd2a82
Showing 1 changed file with 14 additions and 11 deletions.
25 changes: 14 additions & 11 deletions aten/src/ATen/native/miopen/RNN_miopen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ Tensor permute_wei_for_miopen(Tensor wei, int64_t mode)
}

void _viewOrCopyParams(MatrixRef<Tensor> params_from, MatrixRef<Tensor> params_to, bool copy) {
AT_ASSERTM(params_from.size(0) == params_to.size(0), "number of layers mismatch");
TORCH_CHECK(params_from.size(0) == params_to.size(0), "number of layers mismatch");
for (size_t i = 0; i < params_from.size(0); i++) {
auto layer_params_from = params_from[i];
auto layer_params_to = params_to[i];
Expand All @@ -257,7 +257,7 @@ void _viewOrCopyParams(MatrixRef<Tensor> params_from, MatrixRef<Tensor> params_t
a != layer_params_from.end() && b != layer_params_to.end();
++a, ++b) {
auto param_from = *a, param_to = *b;
AT_ASSERTM(param_from.type() == param_to.type(), "parameter types mismatch");
TORCH_CHECK(param_from.type() == param_to.type(), "parameter types mismatch");
if (copy) {
param_to.copy_(param_from.view_as(param_to));
} else {
Expand All @@ -268,15 +268,15 @@ void _viewOrCopyParams(MatrixRef<Tensor> params_from, MatrixRef<Tensor> params_t
}

void _copyParams_and_permute(MatrixRef<Tensor> params_from, MatrixRef<Tensor> params_to, int64_t mode) {
AT_ASSERTM(params_from.size(0) == params_to.size(0), "number of layers mismatch");
TORCH_CHECK(params_from.size(0) == params_to.size(0), "number of layers mismatch");
for (size_t i = 0; i < params_from.size(0); i++) {
auto layer_params_from = params_from[i];
auto layer_params_to = params_to[i];
for (auto a = layer_params_from.begin(), b = layer_params_to.begin();
a != layer_params_from.end() && b != layer_params_to.end();
++a, ++b) {
auto param_from = *a, param_to = *b;
AT_ASSERTM(param_from.type() == param_to.type(), "parameter types mismatch");
TORCH_CHECK(param_from.type() == param_to.type(), "parameter types mismatch");
auto tmp = permute_wei_for_miopen(param_from, mode);
param_to.copy_(tmp.view_as(param_to));
}
Expand All @@ -297,7 +297,7 @@ int64_t get_num_weights(miopenHandle_t handle, const RNNDescriptor& rnn_desc,
size_t weight_size;
MIOPEN_CHECK(miopenGetRNNParamsSize(handle, rnn_desc.desc(), x_desc.desc(), &weight_size, datatype));
auto element_size = dataSize(datatype);
AT_ASSERTM(weight_size % element_size == 0, "miopenGetRNNParamsSize returned nonsensical weight_size.");
TORCH_CHECK(weight_size % element_size == 0, "miopenGetRNNParamsSize returned nonsensical weight_size.");
return weight_size / element_size;
}

Expand Down Expand Up @@ -359,7 +359,8 @@ std::pair<std::vector<Tensor>, size_t> get_parameters(miopenHandle_t handle, con
params.emplace_back(std::move(param));
layer_params_count++;
} else {
AT_ASSERTM(cur_offset == offset, "cur_offset = ", cur_offset, " ; offset = ", offset);
TORCH_INTERNAL_ASSERT(cur_offset == offset,
"cur_offset = ", cur_offset, " ; offset = ", offset);
}
cur_offset = offset + param_size;
}
Expand Down Expand Up @@ -392,7 +393,8 @@ std::pair<std::vector<Tensor>, size_t> get_parameters(miopenHandle_t handle, con
params.emplace_back(std::move(param));
layer_params_count++;
} else {
AT_ASSERTM(cur_offset == offset, "cur_offset = ", cur_offset, " ; offset = ", offset);
TORCH_INTERNAL_ASSERT(cur_offset == offset,
"cur_offset = ", cur_offset, " ; offset = ", offset);
}
cur_offset = offset + bias_size;
}
Expand All @@ -401,9 +403,9 @@ std::pair<std::vector<Tensor>, size_t> get_parameters(miopenHandle_t handle, con
if (layer == 0) {
global_layer_params_count = layer_params_count;
} else {
AT_ASSERTM(global_layer_params_count == layer_params_count,
"global_layer_params_count = ", global_layer_params_count,
"; layer_params_count = ", layer_params_count);
TORCH_INTERNAL_ASSERT(global_layer_params_count == layer_params_count,
"global_layer_params_count = ", global_layer_params_count,
"; layer_params_count = ", layer_params_count);
}
} // layer
return std::make_pair(params, global_layer_params_count);
Expand Down Expand Up @@ -586,7 +588,8 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> miopen_rnn_backward_input(
auto dhy = grad_hy.contiguous().view(hidden_size);
auto dcy = grad_cy.defined() ? grad_cy.contiguous().view(hidden_size) : Tensor();
auto dhx = at::empty(hidden_size, hx.options());
AT_ASSERTM(cx.defined() || !output_mask[2], "illegally required grad of cx for non-LSTM RNN");
TORCH_INTERNAL_ASSERT(cx.defined() || !output_mask[2],
"illegally required grad of cx for non-LSTM RNN");
auto dcx = cx.defined() ? at::empty(hidden_size, cx.options()) : Tensor();

TORCH_CHECK(fn_train, "miopen RNN backward can only be called in training mode");
Expand Down

0 comments on commit ebd2a82

Please sign in to comment.