Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TFL] Fix unbounded activation clip range. #46991

Closed
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
13 changes: 13 additions & 0 deletions tensorflow/lite/kernels/internal/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -952,6 +952,19 @@ cc_test(
],
)

cc_test(
name = "common_test",
srcs = [
"common_test.cc",
],
shard_count = 2,
WindQAQ marked this conversation as resolved.
Show resolved Hide resolved
deps = [
":common",
"//tensorflow/lite/kernels:kernel_util",
"@com_google_googletest//:gtest_main",
],
)

cc_test(
name = "conv_per_channel_quantized_16x8_test",
srcs = [
Expand Down
15 changes: 12 additions & 3 deletions tensorflow/lite/kernels/internal/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,21 @@ inline void GetActivationMinMax(FusedActivationFunctionType ac,
float* output_activation_max) {
switch (ac) {
case FusedActivationFunctionType::kNone:
*output_activation_min = std::numeric_limits<float>::lowest();
*output_activation_max = std::numeric_limits<float>::max();
// At least for most common floating point representations such as
// IEC 559/IEEE 754 and bfloat16, -infinity is the representation of
// negative infinity. Otherwise, there is no such guarantee.
*output_activation_min = std::numeric_limits<float>::has_infinity
? -std::numeric_limits<float>::infinity()
: std::numeric_limits<float>::lowest();
*output_activation_max = std::numeric_limits<float>::has_infinity
? std::numeric_limits<float>::infinity()
: std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu:
*output_activation_min = 0.f;
*output_activation_max = std::numeric_limits<float>::max();
*output_activation_max = std::numeric_limits<float>::has_infinity
? std::numeric_limits<float>::infinity()
: std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu1:
*output_activation_min = -1.f;
Expand Down
79 changes: 79 additions & 0 deletions tensorflow/lite/kernels/internal/common_test.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/common.h"

#include <gmock/gmock.h>
#include <gtest/gtest.h>

#include <algorithm>
#include <limits>
#include <vector>

#include "tensorflow/lite/kernels/kernel_util.h"

namespace tflite {
namespace {

using ::testing::ElementsAreArray;

constexpr float GetMinValue() {
return std::numeric_limits<float>::has_infinity
? -std::numeric_limits<float>::infinity()
: std::numeric_limits<float>::lowest();
}

constexpr float GetMaxValue() {
return std::numeric_limits<float>::has_infinity
? std::numeric_limits<float>::infinity()
: std::numeric_limits<float>::max();
}

TEST(ActivationFunctionWithMinMaxTest, FloatNoneActivation) {
// For float with kTfLiteActNone, both sides are unbounded, so the output must
// be the same with input.
std::vector<float> input{
GetMinValue(), std::numeric_limits<float>::lowest(), -1.0, 0.0,
1.0, std::numeric_limits<float>::max(), GetMaxValue()};
std::vector<float> output(input.size());

float activation_min, activation_max;
CalculateActivationRange(TfLiteFusedActivation::kTfLiteActNone,
&activation_min, &activation_max);

std::transform(input.begin(), input.end(), output.begin(), [&](float x) {
return ActivationFunctionWithMinMax(x, activation_min, activation_max);
});
EXPECT_THAT(output, ElementsAreArray(input));
}

TEST(ActivationFunctionWithMinMaxTest, FloatReluActivation) {
// For float with kTfLiteActRelu, positive side is unbounded, so the output
// must be the same with positive input.
std::vector<float> input{0.0, 1.0, std::numeric_limits<float>::max(),
GetMaxValue()};
std::vector<float> output(input.size());

float activation_min, activation_max;
CalculateActivationRange(TfLiteFusedActivation::kTfLiteActRelu,
&activation_min, &activation_max);

std::transform(input.begin(), input.end(), output.begin(), [&](float x) {
return ActivationFunctionWithMinMax(x, activation_min, activation_max);
});
EXPECT_THAT(output, ElementsAreArray(input));
}

} // namespace
} // namespace tflite
WindQAQ marked this conversation as resolved.
Show resolved Hide resolved
18 changes: 14 additions & 4 deletions tensorflow/lite/kernels/kernel_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,22 +248,32 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
int32_t* act_max);

// Calculates the useful range of an activation layer given its activation
// tensor.a
// tensor.a. For floating point with unbounded activation such as relu and none,
// sets max/min to positive/negative infinity in case infinity is clipped.
template <typename T>
void CalculateActivationRange(TfLiteFusedActivation activation,
T* activation_min, T* activation_max) {
if (activation == kTfLiteActRelu) {
*activation_min = 0;
*activation_max = std::numeric_limits<T>::max();
*activation_max = std::numeric_limits<T>::has_infinity
? std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::max();
} else if (activation == kTfLiteActRelu6) {
*activation_min = 0;
*activation_max = 6;
} else if (activation == kTfLiteActReluN1To1) {
*activation_min = -1;
*activation_max = 1;
} else {
*activation_min = std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::max();
// At least for most common floating point representations such as
// IEC 559/IEEE 754 and bfloat16, -infinity is the representation of
// negative infinity. Otherwise, there is no such guarantee.
*activation_min = std::numeric_limits<T>::has_infinity
? -std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::has_infinity
? std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::max();
}
}

Expand Down