Skip to content

Commit

Permalink
feat: update truncate long/double warning message
Browse files Browse the repository at this point in the history
Signed-off-by: inocsin <vcheungyi@163.com>
  • Loading branch information
inocsin committed Mar 23, 2021
1 parent 69e49e8 commit 60dba12
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions core/conversion/var/Var.cpp
Expand Up @@ -98,13 +98,13 @@ nvinfer1::ITensor* Var::ITensorOrFreeze(ConversionCtx* ctx) {
if (isIValue()) {
auto tensor = ptr_.ivalue->toTensor();
if ((tensor.scalar_type() == at::kLong || tensor.scalar_type() == at::kDouble) && !ctx->settings.truncate_long_and_double) {
TRTORCH_CHECK(0, "Unable to freeze tensor of type kLong/kDouble into constant layer, try to compile model with truncate_long_and_double ON");
TRTORCH_THROW_ERROR("Unable to freeze tensor of type Int64/Float64 into constant layer, try to compile model with truncate_long_and_double ON");
} else if (tensor.scalar_type() == at::kLong && ctx->settings.truncate_long_and_double) {
weights = converters::Weights(ctx, tensor.toType(at::kInt));
LOG_WARNING("Warning: Truncating weight (constant in the graph) from kLong to kInt to indicate that only constants are affected.");
LOG_WARNING("Warning: Truncating weight (constant in the graph) from Int64 to Int32.");
} else if (tensor.scalar_type() == at::kDouble && ctx->settings.truncate_long_and_double) {
weights = converters::Weights(ctx, tensor.toType(at::kFloat));
LOG_WARNING("Warning: Truncating weight (constant in the graph) from kDouble to kFloat to indicate that only constants are affected.");
LOG_WARNING("Warning: Truncating weight (constant in the graph) from Float64 to Float32.");
} else {
weights = converters::Weights(ctx, tensor);
}
Expand Down

0 comments on commit 60dba12

Please sign in to comment.