Skip to content
Permalink
Browse files Browse the repository at this point in the history
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
  • Loading branch information
mihaimaruseac authored and tensorflower-gardener committed Jul 16, 2021
1 parent 7a1346b commit dfa22b3
Show file tree
Hide file tree
Showing 8 changed files with 165 additions and 132 deletions.
14 changes: 8 additions & 6 deletions tensorflow/lite/kernels/internal/averagepool_quantized_test.cc
Expand Up @@ -40,12 +40,14 @@ void RunOneAveragePoolTest(const PoolParams& params,
std::vector<int8> optimized_averagePool_output(buffer_size);
std::vector<int8> reference_averagePool_output(buffer_size);

reference_integer_ops::AveragePool(params, input_shape, input_data,
output_shape,
reference_averagePool_output.data());
optimized_integer_ops::AveragePool(params, input_shape, input_data,
output_shape,
optimized_averagePool_output.data());
bool reference_success = reference_integer_ops::AveragePool(
params, input_shape, input_data, output_shape,
reference_averagePool_output.data());
bool optimized_success = optimized_integer_ops::AveragePool(
params, input_shape, input_data, output_shape,
optimized_averagePool_output.data());
EXPECT_TRUE(reference_success);
EXPECT_TRUE(optimized_success);

for (int i = 0; i < buffer_size; i++) {
EXPECT_TRUE(reference_averagePool_output[i] ==
Expand Down
Expand Up @@ -144,7 +144,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
}
}

inline void AveragePool(const PoolParams& params,
inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& output_shape, int8* output_data) {
ruy::profiler::ScopeLabel label("AveragePool/8bitWith32bitAccumulator");
Expand Down Expand Up @@ -192,6 +192,7 @@ inline void AveragePool(const PoolParams& params,
std::min(params.filter_height, input_height - in_y_origin);
const int filter_count =
(filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
if (filter_count == 0) return false;
memset(acc, 0, tranche_depth * sizeof(acc[0]));
const int8* input_ptr =
input_data + depth_base +
Expand Down Expand Up @@ -267,6 +268,7 @@ inline void AveragePool(const PoolParams& params,
}
}
}
return true;
}

} // namespace optimized_integer_ops
Expand Down
46 changes: 25 additions & 21 deletions tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h
Expand Up @@ -3761,7 +3761,7 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
output_data, output_dims);
}

inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
inline bool AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min,
Expand All @@ -3776,35 +3776,37 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
return AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims,
bool AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);

AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool(input_data, input_dims, stride_width, stride_height,
pad_width, pad_height, kwidth, kheight,
output_activation_min, output_activation_max, output_data,
output_dims);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, float* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
pad_height, filter_width, filter_height, output_data,
output_dims);
}

inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min,
Expand All @@ -3819,13 +3821,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
return AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
Expand All @@ -3839,21 +3841,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool(input_data, input_dims, stride_width, stride_height,
pad_width, pad_height, filter_width, filter_height,
output_activation_min, output_activation_max, output_data,
output_dims);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
pad_height, filter_width, filter_height,
output_activation_min, output_activation_max,
output_data, output_dims);
}

inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
Expand Down
11 changes: 9 additions & 2 deletions tensorflow/lite/kernels/internal/optimized/optimized_ops.h
Expand Up @@ -3172,7 +3172,7 @@ inline int NodeOffset(int b, int h, int w, int height, int width) {
return (b * height + h) * width + w;
}

inline void AveragePool(const PoolParams& params,
inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
Expand All @@ -3187,6 +3187,9 @@ inline void AveragePool(const PoolParams& params,
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;

if (stride_height == 0) return false;
if (stride_width == 0) return false;

// TODO(benoitjacob) make this a proper reference impl without Eigen!
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
Expand Down Expand Up @@ -3232,9 +3235,11 @@ inline void AveragePool(const PoolParams& params,
params.float_activation_min,
params.float_activation_max);
}

return true;
}

inline void AveragePool(const PoolParams& params,
inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
Expand Down Expand Up @@ -3283,6 +3288,7 @@ inline void AveragePool(const PoolParams& params,
std::min(params.filter_height, input_height - in_y_origin);
const int filter_count =
(filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
if (filter_count == 0) return false;
memset(acc, 0, tranche_depth * sizeof(acc[0]));
const uint8* input_ptr =
input_data + depth_base +
Expand Down Expand Up @@ -3369,6 +3375,7 @@ inline void AveragePool(const PoolParams& params,
}
}
}
return true;
}

inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
Expand Down
Expand Up @@ -21,7 +21,7 @@ limitations under the License.
namespace tflite {
namespace reference_integer_ops {

inline void AveragePool(const PoolParams& params,
inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
Expand Down Expand Up @@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params,
filter_count++;
}
}
if (filter_count == 0) return false;
// Round to the closest integer value.
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
: (acc - filter_count / 2) / filter_count;
Expand All @@ -77,6 +78,7 @@ inline void AveragePool(const PoolParams& params,
}
}
}
return true;
}

inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
Expand Down Expand Up @@ -136,7 +138,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
}
}

inline void AveragePool(const PoolParams& params,
inline bool AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const int16_t* input_data,
const RuntimeShape& output_shape,
Expand Down Expand Up @@ -182,6 +184,7 @@ inline void AveragePool(const PoolParams& params,
filter_count++;
}
}
if (filter_count == 0) return false;
// Round to the closest integer value.
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
: (acc - filter_count / 2) / filter_count;
Expand All @@ -193,6 +196,7 @@ inline void AveragePool(const PoolParams& params,
}
}
}
return true;
}

inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
Expand Down
46 changes: 25 additions & 21 deletions tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h
Expand Up @@ -1487,7 +1487,7 @@ void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
output_data);
}

inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
inline bool AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min,
Expand All @@ -1502,8 +1502,8 @@ inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
return AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}

// Transitional version that will be moved shortly to legacy_reference_ops, as
Expand Down Expand Up @@ -1562,29 +1562,31 @@ inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims,
bool AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);

AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool(input_data, input_dims, stride_width, stride_height,
pad_width, pad_height, kwidth, kheight,
output_activation_min, output_activation_max, output_data,
output_dims);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
bool AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, float* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
pad_height, filter_width, filter_height, output_data,
output_dims);
}

inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
inline bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min,
Expand All @@ -1599,13 +1601,13 @@ inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
return AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
bool AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
Expand All @@ -1619,21 +1621,23 @@ void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool(input_data, input_dims, stride_width, stride_height,
pad_width, pad_height, filter_width, filter_height,
output_activation_min, output_activation_max, output_data,
output_dims);
}

// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
bool AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
return AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width,
pad_height, filter_width, filter_height,
output_activation_min, output_activation_max,
output_data, output_dims);
}

inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
Expand Down

0 comments on commit dfa22b3

Please sign in to comment.