Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TFLite] Add int16x8 support for the MIRROR_PAD operator #52351

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensorflow/compiler/mlir/lite/ir/tfl_ops.td
Expand Up @@ -3923,13 +3923,13 @@ def TFL_MirrorPadOp: TFL_Op<"mirror_pad", [
}];

let arguments = (ins
TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$input,
TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$input,
TFL_TensorOf<[I32, I64]>:$pad,
TFL_MirrorPaddingAttr:$mode
);

let results = (outs
TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$output
TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$output
);

let hasOptions = 1;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/core/kernels/register.cc
Expand Up @@ -279,7 +279,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
/* max_version = */ 4);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
/* min_version = */ 1,
/* max_version = */ 2);
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
/* min_version = */ 1,
Expand Down
17 changes: 17 additions & 0 deletions tensorflow/lite/kernels/mirror_pad.cc
Expand Up @@ -246,6 +246,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_MIRROR_PAD(int64_t);
break;
}
case kTfLiteInt16: {
TF_LITE_MIRROR_PAD(int16_t);
break;
}
default:
status = kTfLiteError;
break;
Expand All @@ -272,6 +276,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
NumDimensions(input_tensor));

if (input_tensor->type == kTfLiteUInt8 || input_tensor->type == kTfLiteInt8 ||
input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.scale,
output_tensor->params.scale);
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point,
output_tensor->params.zero_point);
}

if (input_tensor->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input_tensor->params.zero_point, 0);
alankelly marked this conversation as resolved.
Show resolved Hide resolved
TF_LITE_ENSURE_EQ(context, output_tensor->params.zero_point, 0);
}

if (!IsConstantTensor(padding_matrix)) {
SetTensorToDynamic(output_tensor);
return kTfLiteOk;
Expand Down
77 changes: 71 additions & 6 deletions tensorflow/lite/kernels/mirror_pad_test.cc
Expand Up @@ -44,12 +44,25 @@ class BaseMirrorPadOpModel : public SingleOpModel {

std::vector<T> GetOutput() { return ExtractVector<T>(output_id_); }

std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_id_), GetScale(output_id_),
GetZeroPoint(output_id_));
}

protected:
int input_id_;
int padding_matrix_id_;
int output_id_;
};

template <typename integer_type = int8_t>
float GetTolerance(float min, float max) {
float kQuantizedStep =
(max - min) / (std::numeric_limits<integer_type>::max() -
std::numeric_limits<integer_type>::min());
return kQuantizedStep;
}

TEST(MirrorPadTest, EmptyPad) {
BaseMirrorPadOpModel<int> model(
{TensorType_INT32, {2, 3}}, {TensorType_INT32, {2, 2}},
Expand Down Expand Up @@ -128,12 +141,15 @@ TEST(MirrorPadTest, PadBothSides_Reflect) {
5, 4, 5, 6, 5, 2, 1, 2, 3, 2}));
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole) {
BaseMirrorPadOpModel<int> model(
{TensorType_INT32, {2, 3}}, {TensorType_INT32, {2, 2}},
{TensorType_INT32, {}}, tflite::MirrorPadMode_SYMMETRIC);
model.PopulateTensor<int>(model.input_tensor_id(), {1, 2, 3, 4, 5, 6});
model.PopulateTensor<int>(model.padding_matrix_tensor_id(), {2, 2, 3, 3});
template <typename dtype>
void PadBothSidesSymetricWhole() {
BaseMirrorPadOpModel<dtype> model(
{GetTensorType<dtype>(), {2, 3}}, {TensorType_INT32, {2, 2}},
{GetTensorType<dtype>(), {}}, tflite::MirrorPadMode_SYMMETRIC);
model.template PopulateTensor<dtype>(model.input_tensor_id(),
{1, 2, 3, 4, 5, 6});
model.template PopulateTensor<int>(model.padding_matrix_tensor_id(),
{2, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(
model.GetOutput(),
Expand All @@ -142,6 +158,55 @@ TEST(MirrorPadTest, PadBothSides_Symmetric_Whole) {
6, 5, 4, 4, 5, 6, 6, 5, 4, 3, 2, 1, 1, 2, 3, 3, 2, 1}));
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_INT64) {
PadBothSidesSymetricWhole<int64_t>();
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_INT32) {
PadBothSidesSymetricWhole<int32_t>();
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_F32) {
PadBothSidesSymetricWhole<float>();
}

template <typename integer_dtype>
void PadBothSidesSymetricWholeQuant() {
const float kMin = -1;
const float kMax =
std::numeric_limits<integer_dtype>::max() /
static_cast<float>(std::numeric_limits<integer_dtype>::max() + 1);
float kQuantizedTolerance = GetTolerance(-6.0, 6.0);
BaseMirrorPadOpModel<integer_dtype> model(
{GetTensorType<integer_dtype>(), {2, 3}, 6.0 * kMin, 6.0 * kMax},
{TensorType_INT32, {2, 2}},
{GetTensorType<integer_dtype>(), {}, 6.0 * kMin, 6.0 * kMax},
tflite::MirrorPadMode_SYMMETRIC);
model.template QuantizeAndPopulate<integer_dtype>(model.input_tensor_id(),
{1, 2, 3, 4, 5, 6});
model.template PopulateTensor<int>(model.padding_matrix_tensor_id(),
{2, 2, 3, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear(
{6, 5, 4, 4, 5, 6, 6, 5, 4, 3, 2, 1, 1, 2, 3, 3, 2, 1,
3, 2, 1, 1, 2, 3, 3, 2, 1, 6, 5, 4, 4, 5, 6, 6, 5, 4,
6, 5, 4, 4, 5, 6, 6, 5, 4, 3, 2, 1, 1, 2, 3, 3, 2, 1},
kQuantizedTolerance)));
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_UINT8) {
PadBothSidesSymetricWholeQuant<uint8_t>();
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_INT8) {
PadBothSidesSymetricWholeQuant<int8_t>();
}

TEST(MirrorPadTest, PadBothSides_Symmetric_Whole_INT16) {
PadBothSidesSymetricWholeQuant<int16_t>();
}

TEST(MirrorPadTest, PadBothSides_Reflect_Whole) {
BaseMirrorPadOpModel<int> model(
{TensorType_INT32, {2, 3}}, {TensorType_INT32, {2, 2}},
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/kernels/register_ref.cc
Expand Up @@ -460,7 +460,7 @@ BuiltinRefOpResolver::BuiltinRefOpResolver() {
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD(),
/* min_version = */ 1,
/* max_version = */ 2);
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2(),
/* min_version = */ 1,
Expand Down
1 change: 1 addition & 0 deletions tensorflow/lite/testing/op_tests/mirror_pad.py
Expand Up @@ -73,6 +73,7 @@ def make_mirror_pad_tests(options):
"mode": ["REFLECT"],
"type": ["const"],
"fully_quantize": [False, True],
"quant_16x8": [False, True],
},
{
"input_shape": [[3, 2, 4, 5]],
Expand Down
1 change: 0 additions & 1 deletion tensorflow/lite/tools/optimize/operator_property.cc
Expand Up @@ -1091,7 +1091,6 @@ OperatorProperty GetOperatorProperty(OpVariant op_variant) {
return true;
};
property.version = 2;
property.quantizable_int16 = false;
break;
case BuiltinOperator_REDUCE_PROD:
property.inputs = {{0, {}}};
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/tools/versioning/op_version.cc
Expand Up @@ -726,6 +726,7 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
case BuiltinOperator_CONCATENATION:
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_MEAN:
case BuiltinOperator_MIRROR_PAD:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:
case BuiltinOperator_RELU6:
Expand Down Expand Up @@ -849,7 +850,6 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
case BuiltinOperator_RSQRT:
case BuiltinOperator_SQUARED_DIFFERENCE:
case BuiltinOperator_DEPTH_TO_SPACE:
case BuiltinOperator_MIRROR_PAD:
if (op_sig.inputs.at(0).type == kTfLiteInt8) {
return 2;
}
Expand Down
4 changes: 4 additions & 0 deletions tensorflow/lite/tools/versioning/op_version_test.cc
Expand Up @@ -415,6 +415,10 @@ TEST(OpVersionTest, VersioningReduceMaxTest) {
SimpleVersioningTestExtended(BuiltinOperator_REDUCE_MAX);
}

TEST(OpVersionTest, VersioningMirrorPadTest) {
SimpleVersioningTestExtended(BuiltinOperator_MIRROR_PAD);
}

TEST(OpVersionTest, VersioningReduceProdTest) {
OpSignature fake_op_sig;
fake_op_sig.op = BuiltinOperator_REDUCE_PROD;
Expand Down
1 change: 1 addition & 0 deletions tensorflow/lite/tools/versioning/runtime_version.cc
Expand Up @@ -280,6 +280,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
{{BuiltinOperator_SQUARED_DIFFERENCE, 2}, "2.5.0"},
{{BuiltinOperator_MIRROR_PAD, 1}, "1.13.1"},
{{BuiltinOperator_MIRROR_PAD, 2}, "2.3.0"},
{{BuiltinOperator_MIRROR_PAD, 3}, "2.12.0"},
{{BuiltinOperator_UNIQUE, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 1}, "1.14.0"},
{{BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, 2}, "1.14.0"},
Expand Down