Skip to content

Commit

Permalink
[INTEL MKL] Enabled MIN_FIRST support and primitive caching for MKL-D…
Browse files Browse the repository at this point in the history
…NN Quantize OP
  • Loading branch information
rgomathi committed Sep 13, 2019
1 parent 163a824 commit b52e873
Show file tree
Hide file tree
Showing 4 changed files with 411 additions and 45 deletions.
28 changes: 25 additions & 3 deletions tensorflow/core/graph/mkl_layout_pass.cc
Expand Up @@ -641,7 +641,7 @@ class MklLayoutRewritePass : public GraphOptimizationPass {
kRewriteForLayoutPropagation});
rinfo_.push_back({csinfo_.quantize_v2,
mkl_op_registry::GetMklOpName(csinfo_.quantize_v2),
CopyAttrsAll, QuantizeOpRewrite,
CopyAttrsQuantizeV2, QuantizeOpRewrite,
kRewriteForLayoutPropagation});
rinfo_.push_back({csinfo_.relu, mkl_op_registry::GetMklOpName(csinfo_.relu),
CopyAttrsAll, AlwaysRewrite,
Expand Down Expand Up @@ -1551,8 +1551,9 @@ rinfo_.push_back({csinfo_.tanh_grad,
string round_mode_string;
TF_CHECK_OK(GetNodeAttr(n->def(), "mode", &mode_string));
TF_CHECK_OK(GetNodeAttr(n->def(), "round_mode", &round_mode_string));
if (mode_string != "SCALED" || round_mode_string != "HALF_TO_EVEN") {
VLOG(1) << "QuantizeOpRewrite: Mode is not SCALED and/or"
if (!((mode_string == "SCALED" && round_mode_string == "HALF_TO_EVEN") ||
(mode_string == "MIN_FIRST"))) {
VLOG(1) << "QuantizeOpRewrite: Mode is not SCALED or MIN_FIRST and/or"
<< "rounding mode is not HALF_TO_EVEN. "
<< "This case is not optimized by Intel MKL, thus using Eigen op"
<< "for Quantize op ";
Expand Down Expand Up @@ -1807,6 +1808,7 @@ rinfo_.push_back({csinfo_.tanh_grad,
// NOTE: names are alphabetically sorted.
static void CopyAttrsAll(const Node* orig_node, NodeBuilder* nb,
bool change_format = false);

static void CopyAttrsConv(const Node* orig_node, NodeBuilder* nb,
bool change_format = false);
static void CopyAttrsConv2DDepthwiseCheckConstFilter(
Expand Down Expand Up @@ -1838,6 +1840,8 @@ rinfo_.push_back({csinfo_.tanh_grad,
static void CopyAttrsQuantizedMatMulWithBias(const Node* orig_node,
NodeBuilder* nb,
bool change_format = false);
static void CopyAttrsQuantizeV2(const Node* orig_node, NodeBuilder* nb,
bool change_format = false);

// Generate a graph node in graph 'g' representing a dummy Mkl tensor node,
// using node for original node 'orig_node' and return it in '*out'.
Expand Down Expand Up @@ -2420,6 +2424,24 @@ void MklLayoutRewritePass::CopyAttrsConvCheckConstFilter(const Node* orig_node,
CopyFormatAttrsConv(orig_node, nb, strides, dilations, change_format);
}

void MklLayoutRewritePass::CopyAttrsQuantizeV2(const Node* orig_node,
NodeBuilder* nb,
bool change_format) {
DataType T;
string mode;
string round_mode;

// Get all attributes from old node.
TF_CHECK_OK(GetNodeAttr(orig_node->def(), "T", &T));
TF_CHECK_OK(GetNodeAttr(orig_node->def(), "mode", &mode));
TF_CHECK_OK(GetNodeAttr(orig_node->def(), "round_mode", &round_mode));

// Add attributes to new node.
nb->Attr("T", T);
nb->Attr("mode", mode);
nb->Attr("round_mode", round_mode);
}

void MklLayoutRewritePass::CopyAttrsConv(const Node* orig_node, NodeBuilder* nb,
bool change_format) {
DataType T;
Expand Down

0 comments on commit b52e873

Please sign in to comment.