From 638653371e67adc0210765b907f6a8862b6dbd93 Mon Sep 17 00:00:00 2001 From: Tanmay Das Date: Wed, 15 Mar 2023 20:48:27 +0000 Subject: [PATCH 1/4] Add check that output is quantized. --- tensorflow/lite/micro/kernels/add_common.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/lite/micro/kernels/add_common.cc b/tensorflow/lite/micro/kernels/add_common.cc index b285b800c4e..179bc48798a 100644 --- a/tensorflow/lite/micro/kernels/add_common.cc +++ b/tensorflow/lite/micro/kernels/add_common.cc @@ -90,6 +90,7 @@ TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kAddOutputTensor); TF_LITE_ENSURE(context, output != nullptr); + TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); OpDataAdd* data = static_cast(node->user_data); auto* params = reinterpret_cast(node->builtin_data); From e98aa5e30f7953ec0dcd9c2f54b4c4edd8391365 Mon Sep 17 00:00:00 2001 From: Tanmay Das Date: Tue, 21 Mar 2023 22:55:36 +0000 Subject: [PATCH 2/4] Add the check for int8/int16 output type. --- tensorflow/lite/micro/kernels/add_common.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tensorflow/lite/micro/kernels/add_common.cc b/tensorflow/lite/micro/kernels/add_common.cc index 179bc48798a..96b96867c69 100644 --- a/tensorflow/lite/micro/kernels/add_common.cc +++ b/tensorflow/lite/micro/kernels/add_common.cc @@ -90,7 +90,11 @@ TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kAddOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); + // This is to ensure that for int8/int16 output type,output does not have + // a "no-quantization" quantization type. + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); + } OpDataAdd* data = static_cast(node->user_data); auto* params = reinterpret_cast(node->builtin_data); From 61d30cb5de2538657fc2aae45ddbdd03a83b34b5 Mon Sep 17 00:00:00 2001 From: Tanmay Das Date: Fri, 24 Mar 2023 17:36:15 +0000 Subject: [PATCH 3/4] Thank you for suggestion! Moved the check closer to where it is used. --- tensorflow/lite/micro/kernels/add_common.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tensorflow/lite/micro/kernels/add_common.cc b/tensorflow/lite/micro/kernels/add_common.cc index 96b96867c69..52d0c120a38 100644 --- a/tensorflow/lite/micro/kernels/add_common.cc +++ b/tensorflow/lite/micro/kernels/add_common.cc @@ -39,6 +39,10 @@ TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, data->requires_broadcast = !HaveSameShapes(input1, input2); if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // This is to ensure that for int8/int16 output type,output does not have + // a "no-quantization" quantization type. + TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); + // 8bit -> 8bit general quantized path, with general rescalings data->input1_offset = -input1->params.zero_point; data->input2_offset = -input2->params.zero_point; @@ -90,11 +94,6 @@ TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kAddOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - // This is to ensure that for int8/int16 output type,output does not have - // a "no-quantization" quantization type. - if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { - TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); - } OpDataAdd* data = static_cast(node->user_data); auto* params = reinterpret_cast(node->builtin_data); From 858431aa80e00c36345e9cd6c3c54f2f45f2cd54 Mon Sep 17 00:00:00 2001 From: Tanmay Das Date: Sun, 26 Mar 2023 13:49:59 +0000 Subject: [PATCH 4/4] Remove comment. --- tensorflow/lite/micro/kernels/add_common.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/tensorflow/lite/micro/kernels/add_common.cc b/tensorflow/lite/micro/kernels/add_common.cc index 52d0c120a38..ffbf4c06af9 100644 --- a/tensorflow/lite/micro/kernels/add_common.cc +++ b/tensorflow/lite/micro/kernels/add_common.cc @@ -39,8 +39,6 @@ TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, data->requires_broadcast = !HaveSameShapes(input1, input2); if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { - // This is to ensure that for int8/int16 output type,output does not have - // a "no-quantization" quantization type. TFLITE_CHECK_NE(output->quantization.type, kTfLiteNoQuantization); // 8bit -> 8bit general quantized path, with general rescalings