Skip to content
Permalink
Browse files Browse the repository at this point in the history
Fix a null pointer exception caused by branching on uninitialized data.
This is due to not checking that the params for the quantization exists. If there is no quantization, we should not access the `.params` field.

PiperOrigin-RevId: 385173491
Change-Id: I8fc476c4b274fdb21ba741caa0fbc6d1b8840663
  • Loading branch information
mihaimaruseac authored and tensorflower-gardener committed Jul 16, 2021
1 parent e35be97 commit 8933b8a
Showing 1 changed file with 3 additions and 0 deletions.
3 changes: 3 additions & 0 deletions tensorflow/lite/kernels/depthwise_conv.cc
Expand Up @@ -176,6 +176,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
if (data_type != kTfLiteFloat32) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(
filter->quantization.params);
Expand All @@ -195,6 +196,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

if (is_hybrid) {
TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(
filter->quantization.params);
Expand Down Expand Up @@ -495,6 +497,7 @@ TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node,
op_params.weights_offset = 0;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
TF_LITE_ENSURE(context, filter->quantization.type != kTfLiteNoQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
if (kernel_type == kReference) {
Expand Down

0 comments on commit 8933b8a

Please sign in to comment.