Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Lite: Fully_connected Op code refactored #26970

Merged
merged 1 commit into from Mar 25, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
53 changes: 16 additions & 37 deletions tensorflow/lite/kernels/fully_connected.cc
Expand Up @@ -277,17 +277,6 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk;
}

#define TF_LITE_MACRO_DISPATCH(macro_name, params, target_namespace) \
if (params->activation == kTfLiteActNone) { \
macro_name(target_namespace, kNone); \
} \
if (params->activation == kTfLiteActRelu) { \
macro_name(target_namespace, kRelu); \
} \
if (params->activation == kTfLiteActRelu6) { \
macro_name(target_namespace, kRelu6); \
}

namespace {
void FullyConnectedInt8(const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
Expand Down Expand Up @@ -343,38 +332,29 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/1);
return EvalHybrid(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, output);
} else if (kernel_type == kReference) {
switch (output->type) {
case kTfLiteUInt8:
TF_LITE_FULLY_CONNECTED(reference_ops, uint8_t);
break;
case kTfLiteInt8:
FullyConnectedInt8(data, input, filter, bias, output, gemm_context);
break;
case kTfLiteInt16:
TF_LITE_FULLY_CONNECTED(reference_ops, int16_t);
break;
default:
context->ReportError(
context,
"Quantized FullyConnected expects output data type uint8 or int16");
return kTfLiteError;
}
} else {
switch (output->type) {
case kTfLiteUInt8:
TF_LITE_FULLY_CONNECTED(optimized_ops, uint8_t);
if (kernel_type == kReference) {
TF_LITE_FULLY_CONNECTED(reference_ops, uint8_t);
} else {
TF_LITE_FULLY_CONNECTED(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
FullyConnectedInt8(data, input, filter, bias, output, gemm_context);
break;
case kTfLiteInt16:
TF_LITE_FULLY_CONNECTED(optimized_ops, int16_t);
if (kernel_type == kReference) {
TF_LITE_FULLY_CONNECTED(reference_ops, int16_t);
} else {
TF_LITE_FULLY_CONNECTED(optimized_ops, int16_t);
}
break;
default:
context->ReportError(
context,
"Quantized FullyConnected expects output data type uint8 or int16");
context->ReportError(context,
"Quantized FullyConnected expects output data "
"type uint8, int8 or int16");
return kTfLiteError;
}
}
Expand Down Expand Up @@ -457,8 +437,6 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk;
}

#undef TF_LITE_MACRO_DISPATCH

template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
Expand Down Expand Up @@ -501,8 +479,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
default:
context->ReportError(context, "Type %d not currently supported.",
filter->type);
context->ReportError(context,
"Filter data type %s currently not supported.",
TfLiteTypeGetName(filter->type));
return kTfLiteError;
}
return kTfLiteOk;
Expand Down