Skip to content

Commit

Permalink
Merge pull request #34484 from wwwind:fix_minimum_maximum
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 285789493
  • Loading branch information
tensorflower-gardener committed Dec 16, 2019
2 parents 59c8947 + fe404e6 commit ea12cc7
Show file tree
Hide file tree
Showing 6 changed files with 94 additions and 1 deletion.
2 changes: 2 additions & 0 deletions tensorflow/lite/tools/optimize/BUILD
Expand Up @@ -237,6 +237,8 @@ tf_cc_test(
"//tensorflow/lite/tools/optimize:testdata/lstm_calibrated2.bin",
"//tensorflow/lite/tools/optimize:testdata/lstm_quantized.bin",
"//tensorflow/lite/tools/optimize:testdata/lstm_quantized2.bin",
"//tensorflow/lite/tools/optimize:testdata/maximum.bin",
"//tensorflow/lite/tools/optimize:testdata/minimum.bin",
"//tensorflow/lite/tools/optimize:testdata/mixed.bin",
"//tensorflow/lite/tools/optimize:testdata/multi_input_add_reshape.bin",
"//tensorflow/lite/tools/optimize:testdata/single_avg_pool_min_minus_5_max_plus_5.bin",
Expand Down
85 changes: 84 additions & 1 deletion tensorflow/lite/tools/optimize/quantize_model_test.cc
Expand Up @@ -1201,6 +1201,90 @@ TEST_F(QuantizeCustomOpTest, VerifyMixedQuantization) {
}
}

class QuantizeMinimumMaximumTest
: public QuantizeModelTest,
public testing::WithParamInterface<const char*> {
protected:
QuantizeMinimumMaximumTest() {
input_model_ = ReadModel(GetParam());
readonly_model_ = input_model_->GetModel();
readonly_model_->UnPackTo(&model_);
}
};

TEST_P(QuantizeMinimumMaximumTest, VerifyMinimumMaximum) {
auto status = QuantizeModel(&builder_, &model_, &error_reporter_);
ASSERT_EQ(kTfLiteOk, status);
const auto& subgraph = model_.subgraphs[0];

// Check that the first op is Quantize and the last is Dequant.
const auto& quant_op = subgraph->operators[0];
const auto& dequant_op = subgraph->operators[subgraph->operators.size() - 1];
const int32_t quant_idx = quant_op->opcode_index;
const int32_t dequant_idx = dequant_op->opcode_index;
EXPECT_EQ(model_.operator_codes[quant_idx]->builtin_code,
BuiltinOperator_QUANTIZE);
EXPECT_EQ(model_.operator_codes[dequant_idx]->builtin_code,
BuiltinOperator_DEQUANTIZE);
const auto& requant1 = subgraph->operators[1].get();
// Check that we have RE operator.
auto requant1_builtin_code =
model_.operator_codes[requant1->opcode_index].get()->builtin_code;
ASSERT_TRUE(requant1_builtin_code == tflite::BuiltinOperator_QUANTIZE);

const auto& requant2 = subgraph->operators[2].get();
// Check that we have RE operator.
auto requant2_builtin_code =
model_.operator_codes[requant2->opcode_index].get()->builtin_code;
ASSERT_TRUE(requant2_builtin_code == tflite::BuiltinOperator_QUANTIZE);

const auto& op = subgraph->operators[3].get();

// Check that we have MINIMUM or MAXIMUM operator.
auto op_builtin_code =
model_.operator_codes[op->opcode_index].get()->builtin_code;
ASSERT_TRUE(op_builtin_code == tflite::BuiltinOperator_MINIMUM ||
op_builtin_code == tflite::BuiltinOperator_MAXIMUM);

// Check that we have two inputs and one output.
ASSERT_EQ(op->inputs.size(), 2);
ASSERT_EQ(op->outputs.size(), 1);

// Check that all is quantized.
auto output = subgraph->tensors[op->outputs[0]].get();
auto input1 = subgraph->tensors[op->outputs[0]].get();
auto input2 = subgraph->tensors[op->outputs[0]].get();

EXPECT_EQ(output->type, TensorType_INT8);
EXPECT_EQ(input1->type, TensorType_INT8);
EXPECT_EQ(input2->type, TensorType_INT8);

// Check if the quantization params of the minimum/maximum inputs match
// after requantization
EXPECT_EQ(input1->quantization->scale, input2->quantization->scale);
EXPECT_EQ(input1->quantization->zero_point, input2->quantization->zero_point);

// Check the input quantization params match the output ones.
EXPECT_EQ(output->quantization->scale, input1->quantization->scale);
EXPECT_EQ(output->quantization->zero_point, input1->quantization->zero_point);
EXPECT_EQ(output->quantization->scale, input2->quantization->scale);
EXPECT_EQ(output->quantization->zero_point, input2->quantization->zero_point);

EXPECT_EQ(subgraph->tensors.size(), 7);

EXPECT_EQ(subgraph->tensors[0]->name, "input_int8");
EXPECT_EQ(subgraph->tensors[1]->name, "output_int8");
EXPECT_EQ(subgraph->tensors[2]->name, "output/y");
EXPECT_EQ(subgraph->tensors[3]->name, "input_requantized");
EXPECT_EQ(subgraph->tensors[4]->name, "output/y_requantized");
EXPECT_EQ(subgraph->tensors[5]->name, "input");
EXPECT_EQ(subgraph->tensors[6]->name, "output");
}

INSTANTIATE_TEST_SUITE_P(MinimumMaximumTestInst, QuantizeMinimumMaximumTest,
testing::ValuesIn({internal::kModelWithMinimumOp,
internal::kModelWithMaximumOp}));

class QuantizeUnpackTest : public QuantizeModelTest {
protected:
QuantizeUnpackTest() {
Expand All @@ -1209,7 +1293,6 @@ class QuantizeUnpackTest : public QuantizeModelTest {
readonly_model_->UnPackTo(&model_);
}
};

TEST_F(QuantizeUnpackTest, VerifyUnpack) {
auto status = QuantizeModel(&builder_, &model_, &error_reporter_);

Expand Down
2 changes: 2 additions & 0 deletions tensorflow/lite/tools/optimize/test_util.cc
Expand Up @@ -54,6 +54,8 @@ const char* kModelSplit = "split.bin";
const char* kLstmCalibrated = "lstm_calibrated.bin";
const char* kLstmQuantized = "lstm_quantized.bin";

const char* kModelWithMinimumOp = "minimum.bin";
const char* kModelWithMaximumOp = "maximum.bin";
const char* kLstmCalibrated2 = "lstm_calibrated2.bin";
const char* kLstmQuantized2 = "lstm_quantized2.bin";

Expand Down
6 changes: 6 additions & 0 deletions tensorflow/lite/tools/optimize/test_util.h
Expand Up @@ -84,6 +84,12 @@ extern const char* kModelSplit;
extern const char* kLstmCalibrated;
extern const char* kLstmQuantized;

// Test model with a minimum op.
extern const char* kModelWithMinimumOp;

// Test model with a maximum op.
extern const char* kModelWithMaximumOp;

// Test model with LSTM op that has peephole, without layer norm, without
// projection, without cifg.
extern const char* kLstmCalibrated2;
Expand Down
Binary file added tensorflow/lite/tools/optimize/testdata/maximum.bin
Binary file not shown.
Binary file added tensorflow/lite/tools/optimize/testdata/minimum.bin
Binary file not shown.

0 comments on commit ea12cc7

Please sign in to comment.