From 88fba07f3bc735edc20a2ea82e3334279db985f2 Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 30 Mar 2021 11:10:27 -0700 Subject: [PATCH] Remove the div kernel. Workaround for http://b/184059002 --- tensorflow/lite/micro/all_ops_resolver.cc | 1 - tensorflow/lite/micro/kernels/div.cc | 206 ---------- tensorflow/lite/micro/kernels/div_test.cc | 375 ------------------ .../lite/micro/micro_mutable_op_resolver.h | 4 - tensorflow/lite/micro/tools/make/Makefile | 3 - 5 files changed, 589 deletions(-) delete mode 100644 tensorflow/lite/micro/kernels/div.cc delete mode 100644 tensorflow/lite/micro/kernels/div_test.cc diff --git a/tensorflow/lite/micro/all_ops_resolver.cc b/tensorflow/lite/micro/all_ops_resolver.cc index 90824e9775d0a8..a0e55455fac255 100644 --- a/tensorflow/lite/micro/all_ops_resolver.cc +++ b/tensorflow/lite/micro/all_ops_resolver.cc @@ -35,7 +35,6 @@ AllOpsResolver::AllOpsResolver() { AddDepthwiseConv2D(); AddDequantize(); AddDetectionPostprocess(); - AddDiv(); AddElu(); AddEqual(); AddEthosU(); diff --git a/tensorflow/lite/micro/kernels/div.cc b/tensorflow/lite/micro/kernels/div.cc deleted file mode 100644 index 7d7783bf01e4e8..00000000000000 --- a/tensorflow/lite/micro/kernels/div.cc +++ /dev/null @@ -1,206 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/div.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - // Parameters used in the quantized paths where the output is 8bit - int32_t input1_zero_point; - int32_t input2_zero_point; - int32_t output_zero_point; - int32_t output_activation_min; - int32_t output_activation_max; - - // Parameters used in all quantized paths - int32_t output_multiplier; - int output_shift; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDivParams* params, OpData* data) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input1; - TF_LITE_ENSURE_OK(context, - GetInputSafe(context, node, kInputTensor1, &input1)); - const TfLiteTensor* input2; - TF_LITE_ENSURE_OK(context, - GetInputSafe(context, node, kInputTensor2, &input2)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); - - if (output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - const double real_multiplier = static_cast( - input1->params.scale / (input2->params.scale * output->params.scale)); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - auto* params = static_cast(node->builtin_data); - auto* data = static_cast(node->user_data); - return CalculateOpData(context, node, params, data); -} - -void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - -#define TF_LITE_DIV(type, opname, data_type) \ - data_type output_activation_min, output_activation_max; \ - CalculateActivationRange(params->activation, &output_activation_min, \ - &output_activation_max); \ - SetActivationParams(output_activation_min, output_activation_max, \ - &op_params); \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - - bool requires_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (requires_broadcast) { - TF_LITE_DIV(reference_ops, BroadcastDivSlow, float); - } else { - TF_LITE_DIV(reference_ops, Div, float); - } -#undef TF_LITE_DIV -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDivParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - -#define TF_LITE_DIV(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - - if (input1->type == kTfLiteInt8 && input2->type == kTfLiteInt8 && - output->type == kTfLiteInt8) { - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - op_params.input1_offset = -data->input1_zero_point; - op_params.input2_offset = -data->input2_zero_point; - op_params.output_offset = data->output_zero_point; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - - bool requires_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (requires_broadcast) { - TF_LITE_DIV(reference_ops, BroadcastDivSlow, int8_t); - } else { - TF_LITE_DIV(reference_ops, Div, int8_t); - } -#undef TF_LITE_DIV - } else { - TF_LITE_KERNEL_LOG( - context, "Unsupported combination of input and output types in DIV."); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = static_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - auto* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (output->type == kTfLiteFloat32) { - EvalDiv(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, - "DIV only supports FLOAT32, quantized INT8 " - "now, got type %s (%d).", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DIV() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/tensorflow/lite/micro/kernels/div_test.cc b/tensorflow/lite/micro/kernels/div_test.cc deleted file mode 100644 index c8685a1204e2f1..00000000000000 --- a/tensorflow/lite/micro/kernels/div_test.cc +++ /dev/null @@ -1,375 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/kernels/kernel_runner.h" -#include "tensorflow/lite/micro/test_helpers.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -namespace tflite { -namespace testing { -namespace { - -void ExecuteDivTest(TfLiteTensor* tensors, int tensors_count, - TfLiteFusedActivation activation) { - TfLiteDivParams builtin_data = {}; - builtin_data.activation = activation; - - constexpr int kInputArrayData[] = {2, 0, 1}; - TfLiteIntArray* inputs_array = IntArrayFromInts(kInputArrayData); - constexpr int kOutputArrayData[] = {1, 2}; - TfLiteIntArray* outputs_array = IntArrayFromInts(kOutputArrayData); - - const TfLiteRegistration registration = tflite::Register_DIV(); - micro::KernelRunner runner(registration, tensors, tensors_count, inputs_array, - outputs_array, static_cast(&builtin_data)); - - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.InitAndPrepare()); - TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, runner.Invoke()); -} - -template -void TestDiv(TfLiteFusedActivation activation, const int* input1_dims_data, - const T* input1_data, const int* input2_dims_data, - const T* input2_data, const int* expected_dims, - const T* expected_data, T* output_data) { - TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data); - TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data); - TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims); - const int output_count = ElementCount(*output_dims); - - TfLiteTensor tensors[] = { - CreateTensor(input1_data, input1_dims), - CreateTensor(input2_data, input2_dims), - CreateTensor(output_data, output_dims), - }; - constexpr int tensors_count = std::extent::value; - - ExecuteDivTest(tensors, tensors_count, activation); - - constexpr float kTolerance = 1e-5; - for (int i = 0; i < output_count; i++) { - TF_LITE_MICRO_EXPECT_NEAR(expected_data[i], output_data[i], kTolerance); - } -} - -// For quantized Div, the error shouldn't exceed (2*step + step^2). -inline float GetTolerance(int min, int max) { - const float kQuantizedStep = (max - min) / 255.0f; - const float kQuantizedTolerance = - 2.0f * kQuantizedStep + kQuantizedStep * kQuantizedStep; - return kQuantizedTolerance; -} - -// min/max are used to compute scale, zero-point, compare tolerance -template -struct TestQuantParams { - float data_min; // input and output data minimum value - float data_max; // input and output data maximum value - T* input1_data; // quantized input1 storage - T* input2_data; // quantized input2 storage - T* output_data; // quantized output storage -}; - -template -void TestDivQuantized(const TestQuantParams& params, - TfLiteFusedActivation activation, - const int* input1_dims_data, const float* input1_data, - const int* input2_dims_data, const float* input2_data, - const int* expected_dims, const float* expected_data, - float* output_data) { - TfLiteIntArray* input1_dims = IntArrayFromInts(input1_dims_data); - TfLiteIntArray* input2_dims = IntArrayFromInts(input2_dims_data); - TfLiteIntArray* output_dims = IntArrayFromInts(expected_dims); - const int output_count = ElementCount(*output_dims); - - const float scale = ScaleFromMinMax(params.data_min, params.data_max); - const int zero_point = - ZeroPointFromMinMax(params.data_min, params.data_max); - - TfLiteTensor tensors[] = { - CreateQuantizedTensor(input1_data, params.input1_data, input1_dims, scale, - zero_point), - CreateQuantizedTensor(input2_data, params.input2_data, input2_dims, scale, - zero_point), - CreateQuantizedTensor(params.output_data, output_dims, scale, zero_point), - }; - constexpr int kTensorsCount = std::extent::value; - - ExecuteDivTest(tensors, kTensorsCount, activation); - - Dequantize(params.output_data, output_count, scale, zero_point, output_data); - const float kTolerance = GetTolerance(params.data_min, params.data_max); - for (int i = 0; i < output_count; i++) { - TF_LITE_MICRO_EXPECT_NEAR(expected_data[i], output_data[i], kTolerance); - } -} - -template -void TestDivMultiShape(TfLiteFusedActivation activation, const int** shapes, - const int shapes_count, const T* input1_data, - const T* input2_data, const T* expected_data, - T* output_data) { - for (int i = 0; i < shapes_count; i++) { - TestDiv(activation, shapes[i], input1_data, shapes[i], input2_data, - shapes[i], expected_data, output_data); - } -} - -template -void TestDivMultiShapeQuant(const TestQuantParams& params, - TfLiteFusedActivation activation, - const int** shapes, const int shapes_count, - const float* input1_data, const float* input2_data, - const float* expected_data, float* output_data) { - for (int i = 0; i < shapes_count; i++) { - TestDivQuantized(params, activation, shapes[i], input1_data, shapes[i], - input2_data, shapes[i], expected_data, output_data); - } -} - -// when broadcasting input2 is a scaler -template -void TestDivMultiBroadcast(TfLiteFusedActivation activation, const int** shapes, - const int shapes_count, const T* input1_data, - const T* input2_data, const T* expected_data, - T* output_data) { - constexpr int kDimScaler[] = {1, 1}; - for (int i = 0; i < shapes_count; i++) { - TestDiv(activation, shapes[i], input1_data, kDimScaler, input2_data, - shapes[i], expected_data, output_data); - } -} - -// when broadcasting input2 is a scaler -template -void TestDivMultiBroadcastQuant( - const TestQuantParams& params, TfLiteFusedActivation activation, - const int** shapes, const int shapes_count, const float* input1_data, - const float* input2_data, const float* expected_data, float* output_data) { - constexpr int kDimScaler[] = {1, 1}; - for (int i = 0; i < shapes_count; i++) { - TestDivQuantized(params, activation, shapes[i], input1_data, kDimScaler, - input2_data, shapes[i], expected_data, output_data); - } -} - -} // namespace -} // namespace testing -} // namespace tflite - -TF_LITE_MICRO_TESTS_BEGIN - -TF_LITE_MICRO_TEST(FloatDivOpTestActNone) { - constexpr int kDims[] = {4, 1, 2, 2, 1}; - constexpr float kInput1[] = {-0.2, 0.2, -1.2, 0.8}; - constexpr float kInput2[] = {0.5, 0.2, -1.5, 0.5}; - constexpr float kExpect[] = {-0.4, 1.0, 0.8, 1.6}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - tflite::testing::TestDiv(kTfLiteActNone, kDims, kInput1, kDims, kInput2, - kDims, kExpect, output_data); -} - -TF_LITE_MICRO_TEST(FloatDivOpTestActReluN1To1) { - constexpr int kDims[] = {4, 1, 2, 2, 1}; - constexpr float kInput1[] = {-0.2, 0.2, -1.2, 0.8}; - constexpr float kInput2[] = {0.1, 0.2, -1.5, 0.5}; - constexpr float kExpect[] = {-1.0, 1.0, 0.8, 1.0}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - tflite::testing::TestDiv(kTfLiteActReluN1To1, kDims, kInput1, kDims, kInput2, - kDims, kExpect, output_data); -} - -TF_LITE_MICRO_TEST(FloatDivOpTestMultiShape) { - constexpr int kShape1[] = {1, 6}; - constexpr int kShape2[] = {2, 2, 3}; - constexpr int kShape3[] = {3, 2, 1, 3}; - constexpr int kShape4[] = {4, 1, 3, 1, 2}; - const int* kDims[] = {kShape1, kShape2, kShape3, kShape4}; - constexpr int kDimsCount = std::extent::value; - - constexpr float kInput1[] = {-2.0, 0.2, 0.3, 0.8, 1.1, -2.0}; - constexpr float kInput2[] = {0.1, 0.2, 0.6, 0.5, -1.1, -0.1}; - constexpr float kExpect[] = {-20.0, 1.0, 0.5, 1.6, -1.0, 20.0}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - tflite::testing::TestDivMultiShape(kTfLiteActNone, kDims, kDimsCount, kInput1, - kInput2, kExpect, output_data); -} - -TF_LITE_MICRO_TEST(FloatDivOpTestBroadcast) { - constexpr int kShape1[] = {1, 8}; - constexpr int kShape2[] = {2, 2, 4}; - constexpr int kShape3[] = {3, 2, 1, 4}; - constexpr int kShape4[] = {4, 1, 2, 2, 2}; - const int* kDims[] = {kShape1, kShape2, kShape3, kShape4}; - constexpr int kDimsCount = std::extent::value; - - constexpr float kInput1[] = {-0.2, 0.2, 0.07, 0.08, - 0.11, -0.123, -0.32, 0.54}; - constexpr float kInput2[] = {0.1}; - constexpr float kExpect[] = {-2.0, 2.0, 0.7, 0.8, 1.1, -1.23, -3.2, 5.4}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - tflite::testing::TestDivMultiBroadcast(kTfLiteActNone, kDims, kDimsCount, - kInput1, kInput2, kExpect, - output_data); -} - -TF_LITE_MICRO_TEST(FloatDivOpTestBroadcast5D) { - constexpr int kShape1[] = {5, 1, 2, 1, 2, 2}; - const int* kDims[] = {kShape1}; - constexpr int kDimsCount = std::extent::value; - - constexpr float kInput1[] = {-0.2, 0.2, 0.07, 0.08, - 0.11, -0.123, -0.32, 0.54}; - constexpr float kInput2[] = {0.1}; - constexpr float kExpect[] = {-2.0, 2.0, 0.7, 0.8, 1.1, -1.23, -3.2, 5.4}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - tflite::testing::TestDivMultiBroadcast(kTfLiteActNone, kDims, kDimsCount, - kInput1, kInput2, kExpect, - output_data); -} - -TF_LITE_MICRO_TEST(QuantizedDivOpTestActNone) { - constexpr int kDims[] = {4, 1, 2, 2, 1}; - constexpr float kInput1[] = {-0.8, -0.2, 0.3, 0.7}; - constexpr float kInput2[] = {-0.8, 0.4, 0.8, 1.0}; - constexpr float kExpect[] = {1.0, -0.5, 0.375, 0.7}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - // setup quantization storage and parameters - int8_t q_output_data[kOutputCount]; - int8_t q_input1_data[kOutputCount]; - int8_t q_input2_data[kOutputCount]; - tflite::testing::TestQuantParams params = {}; - params.data_min = -1.0; - params.data_max = 1.0; - params.input1_data = q_input1_data; - params.input2_data = q_input2_data; - params.output_data = q_output_data; - - tflite::testing::TestDivQuantized(params, kTfLiteActNone, kDims, kInput1, - kDims, kInput2, kDims, kExpect, - output_data); -} - -TF_LITE_MICRO_TEST(QuantizedDivOpTestActReluN1To1) { - constexpr int kDims[] = {4, 1, 2, 2, 1}; - constexpr float kInput1[] = {-0.8, 0.2, 0.9, 0.7}; - constexpr float kInput2[] = {0.6, 0.4, 0.9, -0.8}; - constexpr float kExpect1[] = {-1.0, 0.5, 1.0, -0.875}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - // setup quantization storage and parameters - int8_t q_output_data[kOutputCount]; - int8_t q_input1_data[kOutputCount]; - int8_t q_input2_data[kOutputCount]; - tflite::testing::TestQuantParams params = {}; - params.data_min = -1.0; - params.data_max = 1.0; - params.input1_data = q_input1_data; - params.input2_data = q_input2_data; - params.output_data = q_output_data; - - tflite::testing::TestDivQuantized(params, kTfLiteActReluN1To1, kDims, kInput1, - kDims, kInput2, kDims, kExpect1, - output_data); - - constexpr float kInput3[] = {-0.5, 0.2, 0.6, 0.3}; - constexpr float kInput4[] = {0.6, 0.5, -0.8, 0.5}; - constexpr float kExpect2[] = {-0.833, 0.4, -0.75, 0.6}; - - tflite::testing::TestDivQuantized(params, kTfLiteActReluN1To1, kDims, kInput3, - kDims, kInput4, kDims, kExpect2, - output_data); -} - -TF_LITE_MICRO_TEST(QuantizedDivOpTestMultiShape) { - constexpr int kShape1[] = {1, 6}; - constexpr int kShape2[] = {2, 2, 3}; - constexpr int kShape3[] = {3, 2, 1, 3}; - constexpr int kShape4[] = {4, 1, 3, 1, 2}; - const int* kDims[] = {kShape1, kShape2, kShape3, kShape4}; - constexpr int kDimsCount = std::extent::value; - - constexpr float kInput1[] = {-2.0, 0.2, 1.7, 0.9, 0.4, 2.0}; - constexpr float kInput2[] = {1.3, 0.3, 1.1, 0.4, -1.1, 1.9}; - constexpr float kExpect[] = {-1.538, 0.667, 1.545, 2.25, -0.364, 1.053}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - // setup quantization storage and parameters - int8_t q_output_data[kOutputCount]; - int8_t q_input1_data[kOutputCount]; - int8_t q_input2_data[kOutputCount]; - tflite::testing::TestQuantParams params = {}; - params.data_min = -3.0; - params.data_max = 3.0; - params.input1_data = q_input1_data; - params.input2_data = q_input2_data; - params.output_data = q_output_data; - - tflite::testing::TestDivMultiShapeQuant(params, kTfLiteActNone, kDims, - kDimsCount, kInput1, kInput2, kExpect, - output_data); -} - -TF_LITE_MICRO_TEST(QuantizedDivOpTestBroadcast) { - constexpr int kShape1[] = {1, 8}; - constexpr int kShape2[] = {2, 2, 4}; - constexpr int kShape3[] = {3, 2, 1, 4}; - constexpr int kShape4[] = {4, 1, 4, 1, 2}; - constexpr int kShape5[] = {5, 1, 2, 1, 2, 2}; - const int* kDims[] = {kShape1, kShape2, kShape3, kShape4, kShape5}; - constexpr int kDimsCount = std::extent::value; - - constexpr float kInput1[] = {-2.0, 0.2, 0.7, 0.8, -0.5, 1.1, -1.3, 1.2}; - constexpr float kInput2[] = {0.7}; - constexpr float kExpect[] = {-2.857, 0.286, 1.0, 1.143, - -0.714, 1.571, -1.857, 1.714}; - constexpr int kOutputCount = std::extent::value; - float output_data[kOutputCount]; - - // setup quantization storage and parameters - int8_t q_output_data[kOutputCount]; - int8_t q_input1_data[kOutputCount]; - int8_t q_input2_data[kOutputCount]; - tflite::testing::TestQuantParams params = {}; - params.data_min = -3.0; - params.data_max = 3.0; - params.input1_data = q_input1_data; - params.input2_data = q_input2_data; - params.output_data = q_output_data; - - tflite::testing::TestDivMultiBroadcastQuant(params, kTfLiteActNone, kDims, - kDimsCount, kInput1, kInput2, - kExpect, output_data); -} - -TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/micro_mutable_op_resolver.h b/tensorflow/lite/micro/micro_mutable_op_resolver.h index 44d40342495556..08dd4fdffac8ff 100644 --- a/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ b/tensorflow/lite/micro/micro_mutable_op_resolver.h @@ -193,10 +193,6 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::Register_DETECTION_POSTPROCESS()); } - TfLiteStatus AddDiv() { - return AddBuiltin(BuiltinOperator_DIV, tflite::Register_DIV(), ParseDiv); - } - TfLiteStatus AddElu() { return AddBuiltin(BuiltinOperator_ELU, tflite::Register_ELU(), ParseElu); } diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index d6012458054ca3..98c604e3a79d0e 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -278,7 +278,6 @@ tensorflow/lite/micro/kernels/conv_test.cc \ tensorflow/lite/micro/kernels/depthwise_conv_test.cc \ tensorflow/lite/micro/kernels/dequantize_test.cc \ tensorflow/lite/micro/kernels/detection_postprocess_test.cc \ -tensorflow/lite/micro/kernels/div_test.cc \ tensorflow/lite/micro/kernels/elementwise_test.cc \ tensorflow/lite/micro/kernels/elu_test.cc \ tensorflow/lite/micro/kernels/exp_test.cc \ @@ -338,7 +337,6 @@ tensorflow/lite/micro/kernels/depthwise_conv.cc \ tensorflow/lite/micro/kernels/depthwise_conv_common.cc \ tensorflow/lite/micro/kernels/dequantize.cc \ tensorflow/lite/micro/kernels/detection_postprocess.cc \ -tensorflow/lite/micro/kernels/div.cc \ tensorflow/lite/micro/kernels/elementwise.cc \ tensorflow/lite/micro/kernels/elu.cc \ tensorflow/lite/micro/kernels/ethosu.cc \ @@ -432,7 +430,6 @@ tensorflow/lite/kernels/internal/reference/conv.h \ tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h \ tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h \ tensorflow/lite/kernels/internal/reference/dequantize.h \ -tensorflow/lite/kernels/internal/reference/div.h \ tensorflow/lite/kernels/internal/reference/elu.h \ tensorflow/lite/kernels/internal/reference/exp.h \ tensorflow/lite/kernels/internal/reference/fill.h \