diff --git a/ngraph_bridge/ngraph_mark_for_clustering.cc b/ngraph_bridge/ngraph_mark_for_clustering.cc index 37ef9ce57..b7c8028aa 100644 --- a/ngraph_bridge/ngraph_mark_for_clustering.cc +++ b/ngraph_bridge/ngraph_mark_for_clustering.cc @@ -49,11 +49,6 @@ namespace ngraph_bridge { // "_ngraph_marked_for_clustering" set to "true". Additional metadata (Static // Inputs) for the op is also set. -using ConfirmationFunction = std::function; -using TypeConstraintMap = - std::map>>; -using SetAttributesFunction = std::function; - // Different Checks before we mark for clustering // // Utility function to check if placement on the NGRAPH device has been @@ -75,20 +70,23 @@ static Status CheckIfOutputNode(const Node* node, // Checks if the node's inputs meet all the type constraints static Status TypeConstraintOk(Node* node, - TypeConstraintMap& type_constraint_map, + const TypeConstraintMap& type_constraint_map, bool& type_constraints_ok) { type_constraints_ok = true; - for (auto& name_and_set : type_constraint_map[node->type_string()]) { - auto& type_attr_name = name_and_set.first; - auto& allowed_types = name_and_set.second; - - DataType dt; - - if (GetNodeAttr(node->attrs(), type_attr_name, &dt) != Status::OK() || - std::find(allowed_types.begin(), allowed_types.end(), dt) == - allowed_types.end()) { - type_constraints_ok = false; - break; + const auto& itr = type_constraint_map.find(node->type_string()); + if (itr != type_constraint_map.end()) { + for (const auto& name_and_set : itr->second) { + auto& type_attr_name = name_and_set.first; + auto& allowed_types = name_and_set.second; + + DataType dt; + + if (GetNodeAttr(node->attrs(), type_attr_name, &dt) != Status::OK() || + std::find(allowed_types.begin(), allowed_types.end(), dt) == + allowed_types.end()) { + type_constraints_ok = false; + break; + } } } return Status::OK(); @@ -141,7 +139,8 @@ static ConfirmationFunction SimpleConfirmationFunction() { // Check if op is supported by backend using is_supported API Status IsSupportedByBackend( const Node* node, const ng::runtime::Backend* op_backend, - std::map>>& TFtoNgraphOpMap, + const std::map>>& + TFtoNgraphOpMap, bool& is_supported) { is_supported = true; @@ -163,24 +162,89 @@ Status IsSupportedByBackend( return Status::OK(); } -// -// Main entry point for the marking pass. -// -Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, - const string& current_backend) { +const std::map& GetAttributeSetters() { // - // A map of op types (e.g. "Add") to type constraint maps. For (fake) - // example: + // A map of op types (e.g. "Add") to set_attribute functions. These can be + // used to set any additional attributes. For example: // - // type_constraint_map["Cast"]["SrcT"] = {DT_FLOAT, DT_BOOL}; - // type_constraint_map["Cast"]["DstT"] = {DT_DOUBLE, DT_INT16}; + // confirmation_function_map["MyOp"] = [](Node* n) { + // if(n->condition()){ + // int dummy=5; + // n->AddAttr("_ngraph_dummy_attr", dummy); + // } // - // ...would mean that for the "Cast" op, the "SrcT" type variable can be - // DT_FLOAT or DT_BOOL, and the "DstT" type variable can be DT_DOUBLE or - // DT_INT16. + // vector static_input_index =5; + // n->AddAttr("_ngraph_static_inputs", static_input_index); + // return Status::OK(); + // }; // - static TypeConstraintMap type_constraint_map; + static std::map set_attributes_map; + static bool initialized = false; + + if (!initialized) { + // Set Additional Attributes (if any) + set_attributes_map["Any"] = SetStaticInputs({1}); + set_attributes_map["All"] = SetStaticInputs({1}); + set_attributes_map["ArgMax"] = SetStaticInputs({1}); + set_attributes_map["ArgMin"] = SetStaticInputs({1}); + set_attributes_map["AvgPoolGrad"] = SetStaticInputs({0}); + set_attributes_map["ConcatV2"] = SetStaticInputs({-1}); + set_attributes_map["CombinedNonMaxSuppression"] = + SetStaticInputs({2, 3, 4, 5}); + set_attributes_map["Conv2DBackpropFilter"] = SetStaticInputs({1}); + set_attributes_map["Conv2DBackpropInput"] = SetStaticInputs({0}); + set_attributes_map["ExpandDims"] = SetStaticInputs({1}); + set_attributes_map["Fill"] = SetStaticInputs({0}); + set_attributes_map["GatherV2"] = SetStaticInputs({2}); + set_attributes_map["Max"] = SetStaticInputs({1}); + set_attributes_map["Mean"] = SetStaticInputs({1}); + set_attributes_map["Min"] = SetStaticInputs({1}); + set_attributes_map["NonMaxSuppressionV4"] = SetStaticInputs({2, 3, 4}); + set_attributes_map["OneHot"] = SetStaticInputs({1}); + set_attributes_map["Pad"] = SetStaticInputs({1}); + set_attributes_map["Prod"] = SetStaticInputs({1}); + + set_attributes_map["QuantizeAndDequantizeV2"] = SetStaticInputs({1, 2}); + set_attributes_map["QuantizedConcat"] = [](Node* n) { + SetStaticInputs(n, {0}); // the axis + auto num_of_tensors_to_concat = (n->num_inputs() - 1) / 3; + // mark all mins and maxes static + for (int idx = num_of_tensors_to_concat + 1; idx < n->num_inputs(); + idx++) { + SetStaticInputs(n, {idx}); + } + return Status::OK(); + }; + set_attributes_map["QuantizedConcatV2"] = [](Node* n) { + auto num_of_tensors_to_concat = (n->num_inputs() - 1) / 3; + // mark axis, all mins and maxes static + std::vector static_input_vec; + for (int idx = num_of_tensors_to_concat; idx < n->num_inputs(); idx++) { + static_input_vec.push_back(idx); + } + SetStaticInputs(n, static_input_vec); + return Status::OK(); + }; + set_attributes_map["RandomUniform"] = SetStaticInputs({0}); + set_attributes_map["Reshape"] = SetStaticInputs({1}); + set_attributes_map["ResizeBilinear"] = SetStaticInputs({1}); + set_attributes_map["ScatterNd"] = SetStaticInputs({2}); + set_attributes_map["Slice"] = SetStaticInputs({1, 2}); + set_attributes_map["Split"] = SetStaticInputs({0}); + set_attributes_map["SplitV"] = SetStaticInputs({1, 2}); + set_attributes_map["StridedSlice"] = SetStaticInputs({1, 2, 3}); + set_attributes_map["Sum"] = SetStaticInputs({1}); + set_attributes_map["TopKV2"] = SetStaticInputs({1}); + set_attributes_map["Tile"] = SetStaticInputs({1}); + set_attributes_map["Transpose"] = SetStaticInputs({1}); + set_attributes_map["UnsortedSegmentSum"] = SetStaticInputs({2}); + initialized = true; + } + return set_attributes_map; +} + +const std::map& GetConfirmationMap() { // // A map of op types (e.g. "Add") to confirmation functions. These can be // used to check arbitrary constraints. For example: @@ -198,27 +262,431 @@ Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, // The foregoing function checks every "MyOp" node to make sure that it does // not have the attribute "my_unsupported_attr", and rejects placement if it // does. - static std::map confirmation_function_map; + static bool initialized = false; + if (!initialized) { + // + // Initialize confirmation function map. + // + // Please keep these in alphabetical order by op name. + // + confirmation_function_map["Abs"] = SimpleConfirmationFunction(); + confirmation_function_map["Add"] = SimpleConfirmationFunction(); + confirmation_function_map["AddN"] = SimpleConfirmationFunction(); + confirmation_function_map["AddV2"] = SimpleConfirmationFunction(); + confirmation_function_map["Any"] = SimpleConfirmationFunction(); + confirmation_function_map["All"] = SimpleConfirmationFunction(); + confirmation_function_map["ArgMax"] = SimpleConfirmationFunction(); + confirmation_function_map["ArgMin"] = SimpleConfirmationFunction(); + confirmation_function_map["Atan2"] = SimpleConfirmationFunction(); + confirmation_function_map["AvgPool"] = SimpleConfirmationFunction(); + confirmation_function_map["AvgPoolGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["BatchMatMul"] = SimpleConfirmationFunction(); + confirmation_function_map["BatchMatMulV2"] = SimpleConfirmationFunction(); + confirmation_function_map["BiasAdd"] = SimpleConfirmationFunction(); + confirmation_function_map["BiasAddGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["Cast"] = SimpleConfirmationFunction(); + confirmation_function_map["ConcatV2"] = SimpleConfirmationFunction(); + confirmation_function_map["Const"] = SimpleConfirmationFunction(); + confirmation_function_map["Conv2D"] = SimpleConfirmationFunction(); + confirmation_function_map["Conv2DBackpropFilter"] = + SimpleConfirmationFunction(); + confirmation_function_map["Conv2DBackpropInput"] = + SimpleConfirmationFunction(); + confirmation_function_map["Conv3D"] = SimpleConfirmationFunction(); + confirmation_function_map["CropAndResize"] = SimpleConfirmationFunction(); + confirmation_function_map["Cos"] = SimpleConfirmationFunction(); + confirmation_function_map["Cumsum"] = SimpleConfirmationFunction(); + confirmation_function_map["DepthwiseConv2dNative"] = + SimpleConfirmationFunction(); + confirmation_function_map["DepthToSpace"] = [](Node* n, bool* result) { + std::string tf_data_format; + TF_RETURN_IF_ERROR( + GetNodeAttr(n->attrs(), "data_format", &tf_data_format)); + *result = tf_data_format != "NCHW_VECT_C"; + return Status::OK(); + }; + confirmation_function_map["Dequantize"] = [](Node* n, bool* result) { + string mode; + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "mode", &mode)); + *result = (mode.compare("SCALED") == 0); + return Status::OK(); + }; + confirmation_function_map["Equal"] = SimpleConfirmationFunction(); + confirmation_function_map["Exp"] = SimpleConfirmationFunction(); + confirmation_function_map["ExpandDims"] = SimpleConfirmationFunction(); + confirmation_function_map["Fill"] = SimpleConfirmationFunction(); + confirmation_function_map["Floor"] = SimpleConfirmationFunction(); + confirmation_function_map["FloorDiv"] = SimpleConfirmationFunction(); + confirmation_function_map["FloorMod"] = SimpleConfirmationFunction(); + confirmation_function_map["FusedBatchNorm"] = SimpleConfirmationFunction(); + confirmation_function_map["FusedBatchNormV2"] = + SimpleConfirmationFunction(); + confirmation_function_map["FusedBatchNormV3"] = + SimpleConfirmationFunction(); + confirmation_function_map["FusedBatchNormGrad"] = [](Node* n, + bool* result) { + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "is_training", result)); + return Status::OK(); + }; + confirmation_function_map["FusedBatchNormGradV3"] = [](Node* n, + bool* result) { + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "is_training", result)); + return Status::OK(); + }; + confirmation_function_map["_FusedConv2D"] = SimpleConfirmationFunction(); + confirmation_function_map["GatherNd"] = SimpleConfirmationFunction(); + confirmation_function_map["GatherV2"] = SimpleConfirmationFunction(); + confirmation_function_map["_FusedMatMul"] = + SimpleConfirmationFunction(); // TODO accept under all conditions? + // check? + confirmation_function_map["Greater"] = SimpleConfirmationFunction(); + confirmation_function_map["GreaterEqual"] = SimpleConfirmationFunction(); +#if defined NGRAPH_DISTRIBUTED + confirmation_function_map["HorovodAllreduce"] = + SimpleConfirmationFunction(); + confirmation_function_map["HorovodBroadcast"] = + SimpleConfirmationFunction(); +#endif + confirmation_function_map["Identity"] = SimpleConfirmationFunction(); + confirmation_function_map["IsFinite"] = SimpleConfirmationFunction(); + confirmation_function_map["L2Loss"] = SimpleConfirmationFunction(); + confirmation_function_map["LogSoftmax"] = SimpleConfirmationFunction(); + confirmation_function_map["Less"] = SimpleConfirmationFunction(); + confirmation_function_map["LessEqual"] = SimpleConfirmationFunction(); + confirmation_function_map["Log"] = SimpleConfirmationFunction(); + confirmation_function_map["LogicalAnd"] = SimpleConfirmationFunction(); + confirmation_function_map["LogicalNot"] = SimpleConfirmationFunction(); + confirmation_function_map["LogicalOr"] = SimpleConfirmationFunction(); + confirmation_function_map["MatMul"] = SimpleConfirmationFunction(); + confirmation_function_map["Max"] = SimpleConfirmationFunction(); + confirmation_function_map["Maximum"] = SimpleConfirmationFunction(); + confirmation_function_map["MaxPool"] = SimpleConfirmationFunction(); + confirmation_function_map["MaxPool3D"] = SimpleConfirmationFunction(); + confirmation_function_map["MaxPoolGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["Mean"] = SimpleConfirmationFunction(); + confirmation_function_map["Min"] = SimpleConfirmationFunction(); + confirmation_function_map["Minimum"] = SimpleConfirmationFunction(); + confirmation_function_map["Mul"] = SimpleConfirmationFunction(); + confirmation_function_map["Neg"] = SimpleConfirmationFunction(); + confirmation_function_map["NoOp"] = SimpleConfirmationFunction(); + confirmation_function_map["OneHot"] = SimpleConfirmationFunction(); + confirmation_function_map["Pad"] = SimpleConfirmationFunction(); + confirmation_function_map["Pow"] = SimpleConfirmationFunction(); + confirmation_function_map["PreventGradient"] = SimpleConfirmationFunction(); + confirmation_function_map["Prod"] = SimpleConfirmationFunction(); + confirmation_function_map["Rank"] = SimpleConfirmationFunction(); + confirmation_function_map["RandomUniform"] = SimpleConfirmationFunction(); + confirmation_function_map["QuantizeAndDequantizeV2"] = [](Node* n, + bool* result) { + // accept only when num_bits == 8 and range is given + bool range_given; + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "range_given", &range_given)); + int num_bits; + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "num_bits", &num_bits)); + *result = (num_bits == 8) && range_given; + return Status::OK(); + }; + confirmation_function_map["QuantizedAvgPool"] = + SimpleConfirmationFunction(); + confirmation_function_map["QuantizedConcat"] = SimpleConfirmationFunction(); + confirmation_function_map["QuantizedConcatV2"] = + SimpleConfirmationFunction(); + confirmation_function_map["QuantizedConv2DWithBiasAndReluAndRequantize"] = + SimpleConfirmationFunction(); + confirmation_function_map["QuantizedConv2DWithBiasAndRequantize"] = + SimpleConfirmationFunction(); + confirmation_function_map + ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] = + SimpleConfirmationFunction(); + confirmation_function_map + ["QuantizedConv2DWithBiasSumAndReluAndRequantize"] = + SimpleConfirmationFunction(); + confirmation_function_map["QuantizedMaxPool"] = + SimpleConfirmationFunction(); + confirmation_function_map["QuantizeV2"] = [](Node* n, bool* result) { + string mode; + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "mode", &mode)); + *result = (mode.compare("SCALED") == 0); + return Status::OK(); + }; + confirmation_function_map["RealDiv"] = SimpleConfirmationFunction(); + confirmation_function_map["Reciprocal"] = SimpleConfirmationFunction(); + confirmation_function_map["Relu"] = SimpleConfirmationFunction(); + confirmation_function_map["Relu6"] = SimpleConfirmationFunction(); + confirmation_function_map["ReluGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["Reshape"] = SimpleConfirmationFunction(); + confirmation_function_map["ResizeBilinear"] = SimpleConfirmationFunction(); + confirmation_function_map["Rsqrt"] = SimpleConfirmationFunction(); + confirmation_function_map["RsqrtGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["ScatterNd"] = SimpleConfirmationFunction(); + confirmation_function_map["Select"] = SimpleConfirmationFunction(); + confirmation_function_map["Shape"] = SimpleConfirmationFunction(); + confirmation_function_map["Sigmoid"] = SimpleConfirmationFunction(); + confirmation_function_map["SigmoidGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["Sign"] = SimpleConfirmationFunction(); + confirmation_function_map["Sin"] = SimpleConfirmationFunction(); + confirmation_function_map["Size"] = SimpleConfirmationFunction(); + confirmation_function_map["Slice"] = SimpleConfirmationFunction(); + confirmation_function_map["Snapshot"] = SimpleConfirmationFunction(); + confirmation_function_map["Softmax"] = SimpleConfirmationFunction(); + confirmation_function_map["SoftmaxCrossEntropyWithLogits"] = + SimpleConfirmationFunction(); + confirmation_function_map["Softplus"] = SimpleConfirmationFunction(); + confirmation_function_map["SpaceToDepth"] = + confirmation_function_map["DepthToSpace"]; + confirmation_function_map["SparseSoftmaxCrossEntropyWithLogits"] = + SimpleConfirmationFunction(); + confirmation_function_map["Split"] = SimpleConfirmationFunction(); + confirmation_function_map["SplitV"] = SimpleConfirmationFunction(); + confirmation_function_map["Sqrt"] = SimpleConfirmationFunction(); + confirmation_function_map["Square"] = SimpleConfirmationFunction(); + confirmation_function_map["SquaredDifference"] = + SimpleConfirmationFunction(); + confirmation_function_map["Squeeze"] = SimpleConfirmationFunction(); + confirmation_function_map["StridedSlice"] = SimpleConfirmationFunction(); + confirmation_function_map["Pack"] = SimpleConfirmationFunction(); + confirmation_function_map["Sub"] = SimpleConfirmationFunction(); + confirmation_function_map["Sum"] = SimpleConfirmationFunction(); + confirmation_function_map["Tanh"] = SimpleConfirmationFunction(); + confirmation_function_map["TanhGrad"] = SimpleConfirmationFunction(); + confirmation_function_map["Tile"] = SimpleConfirmationFunction(); + confirmation_function_map["TopKV2"] = [](Node* n, bool* result) { + bool sorted = true; + TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "sorted", &sorted)); + + // sorted = false is not supported right now, it falls back to TF if set + // to false. + *result = sorted; + return Status::OK(); + }; + confirmation_function_map["Transpose"] = SimpleConfirmationFunction(); + confirmation_function_map["Unpack"] = SimpleConfirmationFunction(); + confirmation_function_map["UnsortedSegmentSum"] = + SimpleConfirmationFunction(); + confirmation_function_map["ZerosLike"] = SimpleConfirmationFunction(); + initialized = true; + } + return confirmation_function_map; +} +const TypeConstraintMap& GetTypeConstraintMap() { // - // A map of op types (e.g. "Add") to set_attribute functions. These can be - // used to set any additional attributes. For example: + // A map of op types (e.g. "Add") to type constraint maps. For (fake) + // example: // - // confirmation_function_map["MyOp"] = [](Node* n) { - // if(n->condition()){ - // int dummy=5; - // n->AddAttr("_ngraph_dummy_attr", dummy); - // } + // type_constraint_map["Cast"]["SrcT"] = {DT_FLOAT, DT_BOOL}; + // type_constraint_map["Cast"]["DstT"] = {DT_DOUBLE, DT_INT16}; // - // vector static_input_index =5; - // n->AddAttr("_ngraph_static_inputs", static_input_index); - // return Status::OK(); - // }; + // ...would mean that for the "Cast" op, the "SrcT" type variable can be + // DT_FLOAT or DT_BOOL, and the "DstT" type variable can be DT_DOUBLE or + // DT_INT16. // + static bool initialized = false; + static TypeConstraintMap type_constraint_map; + if (!initialized) { + // + // Initialize type constraint map. + // + type_constraint_map["Abs"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Add"]["T"] = NGraphNumericDTypes(); + type_constraint_map["AddN"]["T"] = NGraphNumericDTypes(); + type_constraint_map["AddV2"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Any"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["All"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["ArgMax"]["T"] = NGraphNumericDTypes(); + type_constraint_map["ArgMax"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["ArgMin"]["T"] = NGraphNumericDTypes(); + type_constraint_map["ArgMin"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Atan2"]["T"] = NGraphRealDTypes(); + type_constraint_map["AvgPool"]["T"] = NGraphNumericDTypes(); + type_constraint_map["AvgPoolGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["BatchMatMul"]["T"] = NGraphNumericDTypes(); + type_constraint_map["BatchMatMulV2"]["T"] = NGraphNumericDTypes(); + type_constraint_map["BiasAdd"]["T"] = NGraphNumericDTypes(); + type_constraint_map["BiasAddGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Cast"]["SrcT"] = NGraphDTypes(); + type_constraint_map["Cast"]["DstT"] = NGraphDTypes(); + type_constraint_map["ConcatV2"]["T"] = NGraphDTypes(); + type_constraint_map["ConcatV2"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Const"]["dtype"] = NGraphDTypes(); + type_constraint_map["Conv2D"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Conv2DBackpropInput"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Conv3D"]["T"] = NGraphNumericDTypes(); + type_constraint_map["CropAndResize"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Cos"]["T"] = NGraphRealDTypes(); + type_constraint_map["Cumsum"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Cumsum"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["DepthToSpace"]["T"] = NGraphDTypes(); + type_constraint_map["DepthwiseConv2dNative"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Dequantize"]["T"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["Equal"]["T"] = NGraphDTypes(); + type_constraint_map["Exp"]["T"] = NGraphNumericDTypes(); + type_constraint_map["ExpandDims"]["T"] = NGraphDTypes(); + type_constraint_map["Floor"]["T"] = NGraphNumericDTypes(); + type_constraint_map["FloorDiv"]["T"] = NGraphNumericDTypes(); + type_constraint_map["FloorMod"]["T"] = NGraphNumericDTypes(); + type_constraint_map["FusedBatchNorm"]["T"] = NGraphNumericDTypes(); + // TODO (mingshan): FusedBatchNormV2, V3 supports DT_HALF,DT_BFLOAT16, + // DT_FLOAT + type_constraint_map["FusedBatchNormV2"]["T"] = {DT_FLOAT}; + type_constraint_map["FusedBatchNormV3"]["T"] = {DT_FLOAT}; + type_constraint_map["FusedBatchNormGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["GatherNd"]["Tparams"] = {DT_FLOAT}; // NGraphDTypes(); + type_constraint_map["GatherNd"]["Tindices"] = NGraphIndexDTypes(); + type_constraint_map["FusedBatchNormGradV3"]["T"] = NGraphNumericDTypes(); + type_constraint_map["GatherV2"]["Tparams"] = NGraphDTypes(); + type_constraint_map["GatherV2"]["Tindices"] = NGraphIndexDTypes(); + type_constraint_map["GatherV2"]["Taxis"] = NGraphIndexDTypes(); + type_constraint_map["_FusedConv2D"]["T"] = NGraphRealDTypes(); + type_constraint_map["_FusedMatMul"]["T"] = NGraphRealDTypes(); + type_constraint_map["Greater"]["T"] = NGraphDTypes(); + type_constraint_map["GreaterEqual"]["T"] = NGraphDTypes(); +#if defined NGRAPH_DISTRIBUTED + type_constraint_map["HorovodAllreduce"]["T"] = NGraphNumericDTypes(); + type_constraint_map["HorovodBroadcast"]["T"] = NGraphNumericDTypes(); +#endif + type_constraint_map["Identity"]["T"] = NGraphDTypes(); + type_constraint_map["IsFinite"]["T"] = NGraphRealDTypes(); + type_constraint_map["L2Loss"]["T"] = NGraphNumericDTypes(); + type_constraint_map["LogSoftmax"]["T"] = NGraphRealDTypes(); + type_constraint_map["Less"]["T"] = NGraphDTypes(); + type_constraint_map["LessEqual"]["T"] = NGraphDTypes(); + type_constraint_map["Log"]["T"] = NGraphNumericDTypes(); + // LogicalAnd and LogicalNot have no type attributes ("T", if it existed, + // would always be bool). + type_constraint_map["MatMul"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Max"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Max"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Maximum"]["T"] = NGraphNumericDTypes(); + type_constraint_map["MaxPool"]["T"] = NGraphNumericDTypes(); + type_constraint_map["MaxPool3D"]["T"] = NGraphNumericDTypes(); + type_constraint_map["MaxPoolGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Mean"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Mean"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Min"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Min"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Minimum"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Mul"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Neg"]["T"] = NGraphNumericDTypes(); + type_constraint_map["NonMaxSuppressionV4"]["T"] = { + DT_FLOAT}; // TF allows half too + type_constraint_map["OneHot"]["T"] = NGraphDTypes(); + type_constraint_map["Pack"]["T"] = NGraphDTypes(); + type_constraint_map["RandomUniform"]["T"] = NGraphDTypes(); + type_constraint_map["Pad"]["T"] = NGraphDTypes(); + type_constraint_map["Pad"]["Tpaddings"] = NGraphIndexDTypes(); + type_constraint_map["Pow"]["T"] = NGraphNumericDTypes(); + type_constraint_map["PreventGradient"]["T"] = NGraphDTypes(); + type_constraint_map["Prod"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Prod"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["QuantizeAndDequantizeV2"]["T"] = NGraphRealDTypes(); + type_constraint_map["QuantizedAvgPool"]["T"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConcat"]["T"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConcatV2"]["T"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] + ["Tinput"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] + ["Tfilter"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] + ["Tbias"] = NGraphBiasDTypes(); + // TODO: check if any other type constraint is required + // https://github.com/tensorflow/tensorflow/blob/c95ca05536144451ef78ca6e2c15f0f65ebaaf95/tensorflow/core/ops/nn_ops.cc#L2780 + type_constraint_map["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] + ["Tinput"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] + ["Tsummand"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] + ["Tfilter"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] + ["Tbias"] = NGraphBiasDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] + ["Tinput"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] + ["Tsummand"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] + ["Tfilter"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] + ["Tbias"] = NGraphBiasDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tinput"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tfilter"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tbias"] = + NGraphBiasDTypes(); + type_constraint_map["QuantizedMaxPool"]["T"] = + NGraphSupportedQuantizedDTypes(); + type_constraint_map["QuantizeV2"]["T"] = NGraphSupportedQuantizedDTypes(); + type_constraint_map["Rank"]["T"] = NGraphNumericDTypes(); + type_constraint_map["RealDiv"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Reciprocal"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Relu"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Relu6"]["T"] = NGraphNumericDTypes(); + type_constraint_map["ReluGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Reshape"]["T"] = NGraphDTypes(); + type_constraint_map["Reshape"]["Tshape"] = NGraphIndexDTypes(); + type_constraint_map["ResizeBilinear"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Rsqrt"]["T"] = NGraphDTypes(); + type_constraint_map["RsqrtGrad"]["T"] = NGraphRealDTypes(); + type_constraint_map["ScatterNd"]["T"] = NGraphDTypes(); + type_constraint_map["ScatterNd"]["Tindices"] = NGraphIndexDTypes(); + type_constraint_map["Select"]["T"] = NGraphDTypes(); + type_constraint_map["Shape"]["T"] = NGraphDTypes(); + type_constraint_map["Shape"]["out_type"] = NGraphIndexDTypes(); + type_constraint_map["Sigmoid"]["T"] = NGraphNumericDTypes(); + type_constraint_map["SigmoidGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Sign"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Sin"]["T"] = NGraphRealDTypes(); + type_constraint_map["Size"]["T"] = NGraphDTypes(); + type_constraint_map["Size"]["out_type"] = NGraphIndexDTypes(); + type_constraint_map["Slice"]["T"] = NGraphDTypes(); + type_constraint_map["Slice"]["Index"] = NGraphIndexDTypes(); + type_constraint_map["Snapshot"]["T"] = NGraphDTypes(); + type_constraint_map["Softmax"]["T"] = NGraphNumericDTypes(); + // For SoftmaxCrossEntropyWithLogits, see + // https://github.com/tensorflow/tensorflow/blob/c95ca05536144451ef78ca6e2c15f0f65ebaaf95/tensorflow/core/ops/nn_ops.cc#L1096 + type_constraint_map["SoftmaxCrossEntropyWithLogits"]["T"] = + NGraphRealDTypes(); + type_constraint_map["Softplus"]["T"] = NGraphRealDTypes(); + type_constraint_map["SpaceToDepth"]["T"] = NGraphDTypes(); + type_constraint_map["SparseSoftmaxCrossEntropyWithLogits"]["T"] = + NGraphRealDTypes(); + type_constraint_map["SparseSoftmaxCrossEntropyWithLogits"]["Tlabels"] = + NGraphNumericDTypes(); + type_constraint_map["Split"]["T"] = NGraphDTypes(); + type_constraint_map["SplitV"]["T"] = NGraphDTypes(); + type_constraint_map["SplitV"]["Tlen"] = NGraphIndexDTypes(); + type_constraint_map["Sqrt"]["T"] = NGraphDTypes(); + type_constraint_map["Square"]["T"] = NGraphDTypes(); + type_constraint_map["SquaredDifference"]["T"] = NGraphDTypes(); + type_constraint_map["Squeeze"]["T"] = NGraphDTypes(); + type_constraint_map["StridedSlice"]["T"] = NGraphDTypes(); + type_constraint_map["StridedSlice"]["Index"] = NGraphIndexDTypes(); + type_constraint_map["Sub"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Sum"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Sum"]["Tidx"] = NGraphIndexDTypes(); + type_constraint_map["Tanh"]["T"] = NGraphNumericDTypes(); + type_constraint_map["TanhGrad"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Tile"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Tile"]["Tmultiples"] = NGraphIndexDTypes(); + type_constraint_map["TopKV2"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Transpose"]["T"] = NGraphDTypes(); + type_constraint_map["Transpose"]["Tperm"] = NGraphIndexDTypes(); + type_constraint_map["Unpack"]["T"] = NGraphDTypes(); + type_constraint_map["UnsortedSegmentSum"]["T"] = NGraphNumericDTypes(); + type_constraint_map["UnsortedSegmentSum"]["Tindices"] = NGraphIndexDTypes(); + type_constraint_map["UnsortedSegmentSum"]["Tnumsegments"] = + NGraphIndexDTypes(); + initialized = true; + } + return type_constraint_map; +} - static std::map set_attributes_map; - +const std::map>>& +GetTFToNgOpMap() { // Constant Op, ReluGrad Op do not have default Constructor // in ngraph, so passing a dummy node auto constant = ngraph::op::Constant::create(ngraph::element::f32, @@ -232,355 +700,398 @@ Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, // are supported by backend // Update this Map if a new TF Op translation is // implemented or a new Ngraph Op has been added - static std::map>> TFtoNgraphOpMap{ - {"Abs", {std::make_shared()}}, - {"Add", {std::make_shared()}}, - {"AddN", {std::make_shared()}}, - {"AddV2", {std::make_shared()}}, - {"Any", {std::make_shared()}}, - {"All", {std::make_shared()}}, - {"ArgMax", {std::make_shared()}}, - {"ArgMin", {std::make_shared()}}, - {"Atan2", {std::make_shared()}}, - {"AvgPool", {std::make_shared()}}, - {"AvgPoolGrad", {std::make_shared()}}, - {"BatchMatMul", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"BatchMatMulV2", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"BiasAdd", - {std::make_shared(), - std::make_shared()}}, - {"BiasAddGrad", {std::make_shared()}}, - {"Cast", {std::make_shared()}}, - {"ConcatV2", {std::make_shared()}}, - {"Const", {constant}}, - {"Conv2D", - {std::make_shared(), - std::make_shared()}}, - {"Conv2DBackpropFilter", - {std::make_shared(), - std::make_shared()}}, - {"Conv2DBackpropInput", - {std::make_shared(), - std::make_shared()}}, - {"Conv3D", - {std::make_shared(), - std::make_shared()}}, - {"Cos", {std::make_shared()}}, - {"CropAndResize", {std::make_shared()}}, - {"Cumsum", {std::make_shared()}}, - {"DepthToSpace", {std::make_shared()}}, - {"DepthwiseConv2dNative", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Dequantize", {std::make_shared()}}, - {"Equal", {std::make_shared()}}, - {"Exp", {std::make_shared()}}, - {"ExpandDims", {std::make_shared()}}, - {"Fill", {std::make_shared()}}, - {"Floor", {std::make_shared()}}, - {"FloorDiv", {std::make_shared()}}, - {"FloorMod", - {std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"FusedBatchNorm", - {std::make_shared(), - std::make_shared(), constant, - std::make_shared(), - std::make_shared()}}, - {"FusedBatchNormV2", - {std::make_shared(), - std::make_shared(), constant, - std::make_shared(), - std::make_shared()}}, - {"FusedBatchNormV3", - {std::make_shared(), - std::make_shared(), constant, - std::make_shared(), - std::make_shared()}}, - {"FusedBatchNormGrad", - {constant, std::make_shared(), - std::make_shared()}}, - {"FusedBatchNormGradV3", - {constant, std::make_shared(), - std::make_shared()}}, - {"GatherNd", {std::make_shared()}}, - {"GatherV2", {std::make_shared()}}, - {"_FusedConv2D", - {std::make_shared(), - std::make_shared(), constant, - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"_FusedMatMul", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), constant, - std::make_shared(), - std::make_shared()}}, - {"Greater", {std::make_shared()}}, - {"GreaterEqual", {std::make_shared()}}, - {"Identity", {}}, - {"IsFinite", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"L2Loss", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"LogSoftmax", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Less", {std::make_shared()}}, - {"LessEqual", {std::make_shared()}}, - {"Log", {std::make_shared()}}, - {"LogicalAnd", {std::make_shared()}}, - {"LogicalNot", {std::make_shared()}}, - {"LogicalOr", {std::make_shared()}}, - {"MatMul", - {std::make_shared(), - std::make_shared()}}, - {"Max", {std::make_shared()}}, - {"Maximum", {std::make_shared()}}, - {"MaxPool", - {std::make_shared(), - std::make_shared()}}, - {"MaxPool3D", - {std::make_shared(), - std::make_shared()}}, - {"MaxPoolGrad", - {std::make_shared(), - std::make_shared()}}, - {"Mean", - {std::make_shared(), constant, - std::make_shared(), - std::make_shared()}}, - {"Min", {std::make_shared()}}, - {"Minimum", {std::make_shared()}}, - {"Mul", {std::make_shared()}}, - {"Neg", {std::make_shared()}}, - {"OneHot", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Pack", - {std::make_shared(), - std::make_shared()}}, - {"Pad", {constant, std::make_shared()}}, - {"Pow", {std::make_shared()}}, - {"PreventGradient", {}}, - {"Prod", {std::make_shared()}}, - {"QuantizeAndDequantizeV2", - {constant, std::make_shared(), - std::make_shared()}}, - // Next few are CPU only ops - {"QuantizedAvgPool", - {std::make_shared(), - std::make_shared()}}, - {"QuantizedConcat", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedConcatV2", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedConv2DWithBiasAndReluAndRequantize", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedConv2DWithBiasAndRequantize", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedConv2DWithBiasSumAndReluAndRequantize", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"QuantizedMaxPool", - {std::make_shared(), - std::make_shared()}}, - // End of CPU only ops - {"QuantizeV2", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - { - "RandomUniform", - {constant, std::make_shared()}, - }, - {"Rank", {constant}}, - {"RealDiv", {std::make_shared()}}, - {"Reciprocal", {constant, std::make_shared()}}, - {"Relu", {std::make_shared()}}, - {"Relu6", - {constant, std::make_shared(), - std::make_shared()}}, - {"ReluGrad", {relu}}, - // TODO: remove Convert later - {"ResizeBilinear", - {std::make_shared(), - std::make_shared()}}, - {"Rsqrt", {constant, std::make_shared()}}, - {"RsqrtGrad", - {constant, std::make_shared(), - std::make_shared()}}, - {"Select", - {std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Reshape", {constant}}, - {"ScatterNd", {constant, std::make_shared()}}, - {"Shape", {constant}}, - {"Sigmoid", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"SigmoidGrad", - {constant, std::make_shared(), - std::make_shared()}}, - {"Sin", {std::make_shared()}}, - {"Size", {constant}}, - {"Sign", {std::make_shared()}}, - {"Slice", {std::make_shared()}}, - {"Snapshot", {}}, - {"Softmax", {std::make_shared()}}, - {"SoftmaxCrossEntropyWithLogits", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Softplus", - {constant, std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"SpaceToDepth", - {std::make_shared(), - std::make_shared()}}, - {"SparseSoftmaxCrossEntropyWithLogits", - {std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Split", {std::make_shared()}}, - {"SplitV", {std::make_shared()}}, - {"Sqrt", {std::make_shared()}}, - {"Square", {std::make_shared()}}, - {"SquaredDifference", - {std::make_shared(), - std::make_shared()}}, - {"Squeeze", {std::make_shared()}}, - {"StridedSlice", - {std::make_shared(), - std::make_shared(), - std::make_shared()}}, - {"Sub", {std::make_shared()}}, - {"Sum", {std::make_shared()}}, - {"Tanh", {std::make_shared()}}, - {"TanhGrad", - {constant, std::make_shared(), - std::make_shared()}}, - {"Tile", {constant, std::make_shared()}}, - {"TopKV2", - {std::make_shared(), - std::make_shared()}}, - {"Transpose", {constant, std::make_shared()}}, - {"UnsortedSegmentSum", - {constant, std::make_shared()}}, - {"Unpack", - {std::make_shared(), - std::make_shared()}}, - {"ZerosLike", {constant}}, - {"HorovodAllreduce", {std::make_shared()}}, - {"HorovodBroadcast", - {std::make_shared()}}, - {"NoOp", {}}, + static std::map>> TFtoNgraphOpMap { + {"Abs", {std::make_shared()}}, + {"Add", {std::make_shared()}}, + {"AddN", {std::make_shared()}}, + {"AddV2", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Any", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared()}}, + {"All", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared()}}, + {"ArgMax", {std::make_shared()}}, + {"ArgMin", {std::make_shared()}}, + {"Atan2", {std::make_shared()}}, + {"AvgPool", {std::make_shared()}}, + {"AvgPoolGrad", {std::make_shared()}}, + {"BatchMatMul", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"BatchMatMulV2", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"BiasAdd", + {std::make_shared(), + std::make_shared()}}, + {"BiasAddGrad", {std::make_shared(), constant}}, + {"Cast", {std::make_shared()}}, + {"ConcatV2", {std::make_shared()}}, + {"Const", {constant}}, {"Conv2D", + {std::make_shared(), + std::make_shared()}}, + {"Conv2DBackpropFilter", + {std::make_shared(), + std::make_shared()}}, + {"Conv2DBackpropInput", + {std::make_shared(), + std::make_shared()}}, + {"Conv3D", + {std::make_shared(), + std::make_shared()}}, + {"Cos", {std::make_shared()}}, + {"CropAndResize", {std::make_shared()}}, + {"Cumsum", {std::make_shared()}}, + {"DepthToSpace", {std::make_shared()}}, + {"DepthwiseConv2dNative", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Dequantize", + {std::make_shared(), constant, + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Equal", {std::make_shared()}}, + {"Exp", {std::make_shared()}}, + {"ExpandDims", {std::make_shared()}}, + {"Fill", {std::make_shared()}}, + {"Floor", {std::make_shared()}}, + {"FloorDiv", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"FloorMod", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"FusedBatchNorm", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared(), + std::make_shared()}}, + {"FusedBatchNormV2", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"FusedBatchNormV3", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"FusedBatchNormGrad", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"FusedBatchNormGradV3", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"GatherNd", {std::make_shared()}}, + {"GatherV2", {std::make_shared()}}, + {"_FusedConv2D", + {std::make_shared(), + std::make_shared(), constant, + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"_FusedMatMul", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), constant, + std::make_shared(), + std::make_shared()}}, + {"Greater", {std::make_shared()}}, + {"GreaterEqual", {std::make_shared()}}, + {"Identity", {}}, {"IsFinite", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"L2Loss", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"LogSoftmax", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), constant}}, + {"Less", {std::make_shared()}}, + {"LessEqual", {std::make_shared()}}, + {"Log", {std::make_shared()}}, + {"LogicalAnd", {std::make_shared()}}, + {"LogicalNot", {std::make_shared()}}, + {"LogicalOr", {std::make_shared()}}, + {"MatMul", + {std::make_shared(), + std::make_shared()}}, + {"Max", {std::make_shared(), constant}}, + {"Maximum", + {std::make_shared(), + std::make_shared()}}, + {"MaxPool", + {std::make_shared(), + std::make_shared()}}, + {"MaxPool3D", + {std::make_shared(), + std::make_shared()}}, + {"MaxPoolGrad", + {std::make_shared(), + std::make_shared()}}, + {"Mean", + {std::make_shared(), constant, + std::make_shared(), + std::make_shared()}}, + {"Min", {std::make_shared(), constant}}, + {"Minimum", + {std::make_shared(), + std::make_shared()}}, + {"Mul", {std::make_shared()}}, + {"Neg", {std::make_shared()}}, + {"OneHot", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Pack", + {std::make_shared(), + std::make_shared()}}, + {"Pad", {constant, std::make_shared()}}, + {"Pow", {std::make_shared()}}, + {"PreventGradient", {}}, + {"Prod", + {std::make_shared(), constant, + std::make_shared()}}, + {"QuantizeAndDequantizeV2", + {constant, std::make_shared(), + std::make_shared()}}, + // Next few are CPU only ops + {"QuantizedAvgPool", + {std::make_shared(), + std::make_shared()}}, + {"QuantizedConcat", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedConcatV2", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedConv2DWithBiasAndReluAndRequantize", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedConv2DWithBiasAndRequantize", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedConv2DWithBiasSumAndReluAndRequantize", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"QuantizedMaxPool", + {std::make_shared(), + std::make_shared()}}, + // End of CPU only ops + {"QuantizeV2", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + { + "RandomUniform", + {constant, std::make_shared()}, + }, + {"Rank", {constant}}, {"RealDiv", + {std::make_shared(), + std::make_shared()}}, + {"Reciprocal", {constant, std::make_shared()}}, + {"Relu", {std::make_shared()}}, + {"Relu6", + {constant, std::make_shared(), + std::make_shared()}}, + {"ReluGrad", {relu}}, + // TODO: remove Convert later + {"ResizeBilinear", + {std::make_shared(), + std::make_shared()}}, + {"Rsqrt", {constant, std::make_shared()}}, + {"RsqrtGrad", + {constant, std::make_shared(), + std::make_shared()}}, + {"Select", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Reshape", {std::make_shared()}}, + {"ScatterNd", {constant, std::make_shared()}}, + {"Shape", {constant}}, {"Sigmoid", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"SigmoidGrad", + {constant, std::make_shared(), + std::make_shared()}}, + {"Sin", {std::make_shared()}}, {"Size", {constant}}, + {"Sign", {std::make_shared()}}, + {"Slice", {std::make_shared()}}, {"Snapshot", {}}, + {"Softmax", {std::make_shared(), constant}}, + {"SoftmaxCrossEntropyWithLogits", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), constant}}, + {"Softplus", + {constant, std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"SpaceToDepth", + {std::make_shared(), + std::make_shared()}}, + {"SparseSoftmaxCrossEntropyWithLogits", + {std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared(), constant}}, + {"Split", {std::make_shared()}}, + {"SplitV", {std::make_shared()}}, + {"Sqrt", {std::make_shared()}}, + {"Square", {std::make_shared()}}, + {"SquaredDifference", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Squeeze", {std::make_shared()}}, + {"StridedSlice", + {std::make_shared(), + std::make_shared(), + std::make_shared()}}, + {"Sub", {std::make_shared()}}, + {"Sum", + {std::make_shared(), + std::make_shared(), constant}}, + {"Tanh", {std::make_shared()}}, + {"TanhGrad", + {constant, std::make_shared(), + std::make_shared()}}, + {"Tile", {constant, std::make_shared()}}, + {"TopKV2", + {std::make_shared(), + std::make_shared()}}, + {"Transpose", {constant, std::make_shared()}}, + {"UnsortedSegmentSum", + {constant, std::make_shared()}}, + {"Unpack", + {std::make_shared(), + std::make_shared()}}, + {"ZerosLike", {constant}}, +#if defined NGRAPH_DISTRIBUTED + {"HorovodAllreduce", {std::make_shared()}}, + {"HorovodBroadcast", + {std::make_shared()}}, +#endif + {"NoOp", {}}, }; - mutex init_mu; - static bool initialized = false; + return TFtoNgraphOpMap; +} + +// +// Main entry point for the marking pass. +// +Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, + const string& current_backend) { + const TypeConstraintMap& type_constraint_map = GetTypeConstraintMap(); + + // confirmation_function_map is non-const unlike the other maps + static std::map confirmation_function_map = + GetConfirmationMap(); + + const std::map& set_attributes_map = + GetAttributeSetters(); + + const std::map>>& + TFtoNgraphOpMap = GetTFToNgOpMap(); - // If the type constraint and confirmation function maps have not been - // initialized, initialize them. // // IF YOU ARE ADDING A NEW OP IMPLEMENTATION, YOU MUST ADD A CONFIRMATION // FUNCTION, TYPE CONTRAINTS (IF ANY) AND STATIC INPUTS INDEXES (IF ANY) FOR @@ -593,481 +1104,16 @@ Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, static std::set disabled_ops_set = {}; + static bool initialized = false; + std::set disabled_ops_set_current = config::GetDisabledOps(); bool op_set_support_has_changed = disabled_ops_set_current != disabled_ops_set; - { - mutex_lock l(init_mu); - - if (!initialized || op_set_support_has_changed) { - // - // Initialize confirmation function map. - // - // Please keep these in alphabetical order by op name. - // - confirmation_function_map["Abs"] = SimpleConfirmationFunction(); - confirmation_function_map["Add"] = SimpleConfirmationFunction(); - confirmation_function_map["AddN"] = SimpleConfirmationFunction(); - confirmation_function_map["AddV2"] = SimpleConfirmationFunction(); - confirmation_function_map["Any"] = SimpleConfirmationFunction(); - confirmation_function_map["All"] = SimpleConfirmationFunction(); - confirmation_function_map["ArgMax"] = SimpleConfirmationFunction(); - confirmation_function_map["ArgMin"] = SimpleConfirmationFunction(); - confirmation_function_map["Atan2"] = SimpleConfirmationFunction(); - confirmation_function_map["AvgPool"] = SimpleConfirmationFunction(); - confirmation_function_map["AvgPoolGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["BatchMatMul"] = SimpleConfirmationFunction(); - confirmation_function_map["BatchMatMulV2"] = SimpleConfirmationFunction(); - confirmation_function_map["BiasAdd"] = SimpleConfirmationFunction(); - confirmation_function_map["BiasAddGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["Cast"] = SimpleConfirmationFunction(); - confirmation_function_map["ConcatV2"] = SimpleConfirmationFunction(); - confirmation_function_map["Const"] = SimpleConfirmationFunction(); - confirmation_function_map["Conv2D"] = SimpleConfirmationFunction(); - confirmation_function_map["Conv2DBackpropFilter"] = - SimpleConfirmationFunction(); - confirmation_function_map["Conv2DBackpropInput"] = - SimpleConfirmationFunction(); - confirmation_function_map["Conv3D"] = SimpleConfirmationFunction(); - confirmation_function_map["CropAndResize"] = SimpleConfirmationFunction(); - confirmation_function_map["Cos"] = SimpleConfirmationFunction(); - confirmation_function_map["Cumsum"] = SimpleConfirmationFunction(); - confirmation_function_map["DepthwiseConv2dNative"] = - SimpleConfirmationFunction(); - confirmation_function_map["DepthToSpace"] = [](Node* n, bool* result) { - std::string tf_data_format; - TF_RETURN_IF_ERROR( - GetNodeAttr(n->attrs(), "data_format", &tf_data_format)); - *result = tf_data_format != "NCHW_VECT_C"; - return Status::OK(); - }; - confirmation_function_map["Dequantize"] = [](Node* n, bool* result) { - string mode; - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "mode", &mode)); - *result = (mode.compare("SCALED") == 0); - return Status::OK(); - }; - confirmation_function_map["Equal"] = SimpleConfirmationFunction(); - confirmation_function_map["Exp"] = SimpleConfirmationFunction(); - confirmation_function_map["ExpandDims"] = SimpleConfirmationFunction(); - confirmation_function_map["Fill"] = SimpleConfirmationFunction(); - confirmation_function_map["Floor"] = SimpleConfirmationFunction(); - confirmation_function_map["FloorDiv"] = SimpleConfirmationFunction(); - confirmation_function_map["FloorMod"] = SimpleConfirmationFunction(); - confirmation_function_map["FusedBatchNorm"] = - SimpleConfirmationFunction(); - confirmation_function_map["FusedBatchNormV2"] = - SimpleConfirmationFunction(); - confirmation_function_map["FusedBatchNormV3"] = - SimpleConfirmationFunction(); - confirmation_function_map["FusedBatchNormGrad"] = [](Node* n, - bool* result) { - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "is_training", result)); - return Status::OK(); - }; - confirmation_function_map["FusedBatchNormGradV3"] = [](Node* n, - bool* result) { - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "is_training", result)); - return Status::OK(); - }; - confirmation_function_map["_FusedConv2D"] = SimpleConfirmationFunction(); - confirmation_function_map["GatherNd"] = SimpleConfirmationFunction(); - confirmation_function_map["GatherV2"] = SimpleConfirmationFunction(); - confirmation_function_map["_FusedMatMul"] = - SimpleConfirmationFunction(); // TODO accept under all conditions? - // check? - confirmation_function_map["Greater"] = SimpleConfirmationFunction(); - confirmation_function_map["GreaterEqual"] = SimpleConfirmationFunction(); -#if defined NGRAPH_DISTRIBUTED - confirmation_function_map["HorovodAllreduce"] = - SimpleConfirmationFunction(); - confirmation_function_map["HorovodBroadcast"] = - SimpleConfirmationFunction(); -#endif - confirmation_function_map["Identity"] = SimpleConfirmationFunction(); - confirmation_function_map["IsFinite"] = SimpleConfirmationFunction(); - confirmation_function_map["L2Loss"] = SimpleConfirmationFunction(); - confirmation_function_map["LogSoftmax"] = SimpleConfirmationFunction(); - confirmation_function_map["Less"] = SimpleConfirmationFunction(); - confirmation_function_map["LessEqual"] = SimpleConfirmationFunction(); - confirmation_function_map["Log"] = SimpleConfirmationFunction(); - confirmation_function_map["LogicalAnd"] = SimpleConfirmationFunction(); - confirmation_function_map["LogicalNot"] = SimpleConfirmationFunction(); - confirmation_function_map["LogicalOr"] = SimpleConfirmationFunction(); - confirmation_function_map["MatMul"] = SimpleConfirmationFunction(); - confirmation_function_map["Max"] = SimpleConfirmationFunction(); - confirmation_function_map["Maximum"] = SimpleConfirmationFunction(); - confirmation_function_map["MaxPool"] = SimpleConfirmationFunction(); - confirmation_function_map["MaxPool3D"] = SimpleConfirmationFunction(); - confirmation_function_map["MaxPoolGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["Mean"] = SimpleConfirmationFunction(); - confirmation_function_map["Min"] = SimpleConfirmationFunction(); - confirmation_function_map["Minimum"] = SimpleConfirmationFunction(); - confirmation_function_map["Mul"] = SimpleConfirmationFunction(); - confirmation_function_map["Neg"] = SimpleConfirmationFunction(); - confirmation_function_map["NoOp"] = SimpleConfirmationFunction(); - confirmation_function_map["OneHot"] = SimpleConfirmationFunction(); - confirmation_function_map["Pad"] = SimpleConfirmationFunction(); - confirmation_function_map["Pow"] = SimpleConfirmationFunction(); - confirmation_function_map["PreventGradient"] = - SimpleConfirmationFunction(); - confirmation_function_map["Prod"] = SimpleConfirmationFunction(); - confirmation_function_map["Rank"] = SimpleConfirmationFunction(); - confirmation_function_map["RandomUniform"] = SimpleConfirmationFunction(); - confirmation_function_map["QuantizeAndDequantizeV2"] = [](Node* n, - bool* result) { - // accept only when num_bits == 8 and range is given - bool range_given; - TF_RETURN_IF_ERROR( - GetNodeAttr(n->attrs(), "range_given", &range_given)); - int num_bits; - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "num_bits", &num_bits)); - *result = (num_bits == 8) && range_given; - return Status::OK(); - }; - confirmation_function_map["QuantizedAvgPool"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizedConcat"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizedConcatV2"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizedConv2DWithBiasAndReluAndRequantize"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizedConv2DWithBiasAndRequantize"] = - SimpleConfirmationFunction(); - confirmation_function_map - ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"] = - SimpleConfirmationFunction(); - confirmation_function_map - ["QuantizedConv2DWithBiasSumAndReluAndRequantize"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizedMaxPool"] = - SimpleConfirmationFunction(); - confirmation_function_map["QuantizeV2"] = [](Node* n, bool* result) { - string mode; - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "mode", &mode)); - *result = (mode.compare("SCALED") == 0); - return Status::OK(); - }; - confirmation_function_map["RealDiv"] = SimpleConfirmationFunction(); - confirmation_function_map["Reciprocal"] = SimpleConfirmationFunction(); - confirmation_function_map["Relu"] = SimpleConfirmationFunction(); - confirmation_function_map["Relu6"] = SimpleConfirmationFunction(); - confirmation_function_map["ReluGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["Reshape"] = SimpleConfirmationFunction(); - confirmation_function_map["ResizeBilinear"] = - SimpleConfirmationFunction(); - confirmation_function_map["Rsqrt"] = SimpleConfirmationFunction(); - confirmation_function_map["RsqrtGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["ScatterNd"] = SimpleConfirmationFunction(); - confirmation_function_map["Select"] = SimpleConfirmationFunction(); - confirmation_function_map["Shape"] = SimpleConfirmationFunction(); - confirmation_function_map["Sigmoid"] = SimpleConfirmationFunction(); - confirmation_function_map["SigmoidGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["Sign"] = SimpleConfirmationFunction(); - confirmation_function_map["Sin"] = SimpleConfirmationFunction(); - confirmation_function_map["Size"] = SimpleConfirmationFunction(); - confirmation_function_map["Slice"] = SimpleConfirmationFunction(); - confirmation_function_map["Snapshot"] = SimpleConfirmationFunction(); - confirmation_function_map["Softmax"] = SimpleConfirmationFunction(); - confirmation_function_map["SoftmaxCrossEntropyWithLogits"] = - SimpleConfirmationFunction(); - confirmation_function_map["Softplus"] = SimpleConfirmationFunction(); - confirmation_function_map["SpaceToDepth"] = - confirmation_function_map["DepthToSpace"]; - confirmation_function_map["SparseSoftmaxCrossEntropyWithLogits"] = - SimpleConfirmationFunction(); - confirmation_function_map["Split"] = SimpleConfirmationFunction(); - confirmation_function_map["SplitV"] = SimpleConfirmationFunction(); - confirmation_function_map["Sqrt"] = SimpleConfirmationFunction(); - confirmation_function_map["Square"] = SimpleConfirmationFunction(); - confirmation_function_map["SquaredDifference"] = - SimpleConfirmationFunction(); - confirmation_function_map["Squeeze"] = SimpleConfirmationFunction(); - confirmation_function_map["StridedSlice"] = SimpleConfirmationFunction(); - confirmation_function_map["Pack"] = SimpleConfirmationFunction(); - confirmation_function_map["Sub"] = SimpleConfirmationFunction(); - confirmation_function_map["Sum"] = SimpleConfirmationFunction(); - confirmation_function_map["Tanh"] = SimpleConfirmationFunction(); - confirmation_function_map["TanhGrad"] = SimpleConfirmationFunction(); - confirmation_function_map["Tile"] = SimpleConfirmationFunction(); - confirmation_function_map["TopKV2"] = [](Node* n, bool* result) { - bool sorted = true; - TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "sorted", &sorted)); - - // sorted = false is not supported right now, it falls back to TF if set - // to false. - *result = sorted; - return Status::OK(); - }; - confirmation_function_map["Transpose"] = SimpleConfirmationFunction(); - confirmation_function_map["Unpack"] = SimpleConfirmationFunction(); - confirmation_function_map["UnsortedSegmentSum"] = - SimpleConfirmationFunction(); - confirmation_function_map["ZerosLike"] = SimpleConfirmationFunction(); - - // - // Initialize type constraint map. - // - type_constraint_map["Abs"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Add"]["T"] = NGraphNumericDTypes(); - type_constraint_map["AddN"]["T"] = NGraphNumericDTypes(); - type_constraint_map["AddV2"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Any"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["All"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["ArgMax"]["T"] = NGraphNumericDTypes(); - type_constraint_map["ArgMax"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["ArgMin"]["T"] = NGraphNumericDTypes(); - type_constraint_map["ArgMin"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Atan2"]["T"] = NGraphRealDTypes(); - type_constraint_map["AvgPool"]["T"] = NGraphNumericDTypes(); - type_constraint_map["AvgPoolGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["BatchMatMul"]["T"] = NGraphNumericDTypes(); - type_constraint_map["BatchMatMulV2"]["T"] = NGraphNumericDTypes(); - type_constraint_map["BiasAdd"]["T"] = NGraphNumericDTypes(); - type_constraint_map["BiasAddGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Cast"]["SrcT"] = NGraphDTypes(); - type_constraint_map["Cast"]["DstT"] = NGraphDTypes(); - type_constraint_map["ConcatV2"]["T"] = NGraphDTypes(); - type_constraint_map["ConcatV2"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Const"]["dtype"] = NGraphDTypes(); - type_constraint_map["Conv2D"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Conv2DBackpropInput"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Conv3D"]["T"] = NGraphNumericDTypes(); - type_constraint_map["CropAndResize"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Cos"]["T"] = NGraphRealDTypes(); - type_constraint_map["Cumsum"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Cumsum"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["DepthToSpace"]["T"] = NGraphDTypes(); - type_constraint_map["DepthwiseConv2dNative"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Dequantize"]["T"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["Equal"]["T"] = NGraphDTypes(); - type_constraint_map["Exp"]["T"] = NGraphNumericDTypes(); - type_constraint_map["ExpandDims"]["T"] = NGraphDTypes(); - type_constraint_map["Floor"]["T"] = NGraphNumericDTypes(); - type_constraint_map["FloorDiv"]["T"] = NGraphNumericDTypes(); - type_constraint_map["FloorMod"]["T"] = NGraphNumericDTypes(); - type_constraint_map["FusedBatchNorm"]["T"] = NGraphNumericDTypes(); - // TODO (mingshan): FusedBatchNormV2, V3 supports DT_HALF,DT_BFLOAT16, - // DT_FLOAT - type_constraint_map["FusedBatchNormV2"]["T"] = {DT_FLOAT}; - type_constraint_map["FusedBatchNormV3"]["T"] = {DT_FLOAT}; - type_constraint_map["FusedBatchNormGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["GatherNd"]["Tparams"] = { - DT_FLOAT}; // NGraphDTypes(); - type_constraint_map["GatherNd"]["Tindices"] = NGraphIndexDTypes(); - type_constraint_map["FusedBatchNormGradV3"]["T"] = NGraphNumericDTypes(); - type_constraint_map["GatherV2"]["Tparams"] = NGraphDTypes(); - type_constraint_map["GatherV2"]["Tindices"] = NGraphIndexDTypes(); - type_constraint_map["GatherV2"]["Taxis"] = NGraphIndexDTypes(); - type_constraint_map["_FusedConv2D"]["T"] = NGraphRealDTypes(); - type_constraint_map["_FusedMatMul"]["T"] = NGraphRealDTypes(); - type_constraint_map["Greater"]["T"] = NGraphDTypes(); - type_constraint_map["GreaterEqual"]["T"] = NGraphDTypes(); -#if defined NGRAPH_DISTRIBUTED - type_constraint_map["HorovodAllreduce"]["T"] = NGraphNumericDTypes(); - type_constraint_map["HorovodBroadcast"]["T"] = NGraphNumericDTypes(); -#endif - type_constraint_map["Identity"]["T"] = NGraphDTypes(); - type_constraint_map["IsFinite"]["T"] = NGraphRealDTypes(); - type_constraint_map["L2Loss"]["T"] = NGraphNumericDTypes(); - type_constraint_map["LogSoftmax"]["T"] = NGraphRealDTypes(); - type_constraint_map["Less"]["T"] = NGraphDTypes(); - type_constraint_map["LessEqual"]["T"] = NGraphDTypes(); - type_constraint_map["Log"]["T"] = NGraphNumericDTypes(); - // LogicalAnd and LogicalNot have no type attributes ("T", if it existed, - // would always be bool). - type_constraint_map["MatMul"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Max"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Max"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Maximum"]["T"] = NGraphNumericDTypes(); - type_constraint_map["MaxPool"]["T"] = NGraphNumericDTypes(); - type_constraint_map["MaxPool3D"]["T"] = NGraphNumericDTypes(); - type_constraint_map["MaxPoolGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Mean"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Mean"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Min"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Min"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Minimum"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Mul"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Neg"]["T"] = NGraphNumericDTypes(); - type_constraint_map["NonMaxSuppressionV4"]["T"] = { - DT_FLOAT}; // TF allows half too - type_constraint_map["OneHot"]["T"] = NGraphDTypes(); - type_constraint_map["Pack"]["T"] = NGraphDTypes(); - type_constraint_map["RandomUniform"]["T"] = NGraphDTypes(); - type_constraint_map["Pad"]["T"] = NGraphDTypes(); - type_constraint_map["Pad"]["Tpaddings"] = NGraphIndexDTypes(); - type_constraint_map["Pow"]["T"] = NGraphNumericDTypes(); - type_constraint_map["PreventGradient"]["T"] = NGraphDTypes(); - type_constraint_map["Prod"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Prod"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["QuantizeAndDequantizeV2"]["T"] = NGraphRealDTypes(); - type_constraint_map["QuantizedAvgPool"]["T"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConcat"]["T"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConcatV2"]["T"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] - ["Tinput"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] - ["Tfilter"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndReluAndRequantize"] - ["Tbias"] = NGraphBiasDTypes(); - // TODO: check if any other type constraint is required - // https://github.com/tensorflow/tensorflow/blob/c95ca05536144451ef78ca6e2c15f0f65ebaaf95/tensorflow/core/ops/nn_ops.cc#L2780 - type_constraint_map - ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"]["Tinput"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map - ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"]["Tsummand"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map - ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"]["Tfilter"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map - ["QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"]["Tbias"] = - NGraphBiasDTypes(); - type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] - ["Tinput"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] - ["Tsummand"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] - ["Tfilter"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasSumAndReluAndRequantize"] - ["Tbias"] = NGraphBiasDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tinput"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tfilter"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizedConv2DWithBiasAndRequantize"]["Tbias"] = - NGraphBiasDTypes(); - type_constraint_map["QuantizedMaxPool"]["T"] = - NGraphSupportedQuantizedDTypes(); - type_constraint_map["QuantizeV2"]["T"] = NGraphSupportedQuantizedDTypes(); - type_constraint_map["Rank"]["T"] = NGraphNumericDTypes(); - type_constraint_map["RealDiv"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Reciprocal"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Relu"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Relu6"]["T"] = NGraphNumericDTypes(); - type_constraint_map["ReluGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Reshape"]["T"] = NGraphDTypes(); - type_constraint_map["Reshape"]["Tshape"] = NGraphIndexDTypes(); - type_constraint_map["ResizeBilinear"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Rsqrt"]["T"] = NGraphDTypes(); - type_constraint_map["RsqrtGrad"]["T"] = NGraphRealDTypes(); - type_constraint_map["ScatterNd"]["T"] = NGraphDTypes(); - type_constraint_map["ScatterNd"]["Tindices"] = NGraphIndexDTypes(); - type_constraint_map["Select"]["T"] = NGraphDTypes(); - type_constraint_map["Shape"]["T"] = NGraphDTypes(); - type_constraint_map["Shape"]["out_type"] = NGraphIndexDTypes(); - type_constraint_map["Sigmoid"]["T"] = NGraphNumericDTypes(); - type_constraint_map["SigmoidGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Sign"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Sin"]["T"] = NGraphRealDTypes(); - type_constraint_map["Size"]["T"] = NGraphDTypes(); - type_constraint_map["Size"]["out_type"] = NGraphIndexDTypes(); - type_constraint_map["Slice"]["T"] = NGraphDTypes(); - type_constraint_map["Slice"]["Index"] = NGraphIndexDTypes(); - type_constraint_map["Snapshot"]["T"] = NGraphDTypes(); - type_constraint_map["Softmax"]["T"] = NGraphNumericDTypes(); - // For SoftmaxCrossEntropyWithLogits, see - // https://github.com/tensorflow/tensorflow/blob/c95ca05536144451ef78ca6e2c15f0f65ebaaf95/tensorflow/core/ops/nn_ops.cc#L1096 - type_constraint_map["SoftmaxCrossEntropyWithLogits"]["T"] = - NGraphRealDTypes(); - type_constraint_map["Softplus"]["T"] = NGraphRealDTypes(); - type_constraint_map["SpaceToDepth"]["T"] = NGraphDTypes(); - type_constraint_map["SparseSoftmaxCrossEntropyWithLogits"]["T"] = - NGraphRealDTypes(); - type_constraint_map["SparseSoftmaxCrossEntropyWithLogits"]["Tlabels"] = - NGraphNumericDTypes(); - type_constraint_map["Split"]["T"] = NGraphDTypes(); - type_constraint_map["SplitV"]["T"] = NGraphDTypes(); - type_constraint_map["SplitV"]["Tlen"] = NGraphIndexDTypes(); - type_constraint_map["Sqrt"]["T"] = NGraphDTypes(); - type_constraint_map["Square"]["T"] = NGraphDTypes(); - type_constraint_map["SquaredDifference"]["T"] = NGraphDTypes(); - type_constraint_map["Squeeze"]["T"] = NGraphDTypes(); - type_constraint_map["StridedSlice"]["T"] = NGraphDTypes(); - type_constraint_map["StridedSlice"]["Index"] = NGraphIndexDTypes(); - type_constraint_map["Sub"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Sum"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Sum"]["Tidx"] = NGraphIndexDTypes(); - type_constraint_map["Tanh"]["T"] = NGraphNumericDTypes(); - type_constraint_map["TanhGrad"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Tile"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Tile"]["Tmultiples"] = NGraphIndexDTypes(); - type_constraint_map["TopKV2"]["T"] = NGraphNumericDTypes(); - type_constraint_map["Transpose"]["T"] = NGraphDTypes(); - type_constraint_map["Transpose"]["Tperm"] = NGraphIndexDTypes(); - type_constraint_map["Unpack"]["T"] = NGraphDTypes(); - type_constraint_map["UnsortedSegmentSum"]["T"] = NGraphNumericDTypes(); - type_constraint_map["UnsortedSegmentSum"]["Tindices"] = - NGraphIndexDTypes(); - type_constraint_map["UnsortedSegmentSum"]["Tnumsegments"] = - NGraphIndexDTypes(); - - // Set Additional Attributes (if any) - set_attributes_map["Any"] = SetStaticInputs({1}); - set_attributes_map["All"] = SetStaticInputs({1}); - set_attributes_map["ArgMax"] = SetStaticInputs({1}); - set_attributes_map["ArgMin"] = SetStaticInputs({1}); - set_attributes_map["AvgPoolGrad"] = SetStaticInputs({0}); - set_attributes_map["ConcatV2"] = SetStaticInputs({-1}); - set_attributes_map["CombinedNonMaxSuppression"] = - SetStaticInputs({2, 3, 4, 5}); - set_attributes_map["Conv2DBackpropFilter"] = SetStaticInputs({1}); - set_attributes_map["Conv2DBackpropInput"] = SetStaticInputs({0}); - set_attributes_map["ExpandDims"] = SetStaticInputs({1}); - set_attributes_map["Fill"] = SetStaticInputs({0}); - set_attributes_map["GatherV2"] = SetStaticInputs({2}); - set_attributes_map["Max"] = SetStaticInputs({1}); - set_attributes_map["Mean"] = SetStaticInputs({1}); - set_attributes_map["Min"] = SetStaticInputs({1}); - set_attributes_map["NonMaxSuppressionV4"] = SetStaticInputs({2, 3, 4}); - set_attributes_map["OneHot"] = SetStaticInputs({1}); - set_attributes_map["Pad"] = SetStaticInputs({1}); - set_attributes_map["Prod"] = SetStaticInputs({1}); - - set_attributes_map["QuantizeAndDequantizeV2"] = SetStaticInputs({1, 2}); - set_attributes_map["QuantizedConcat"] = [](Node* n) { - SetStaticInputs(n, {0}); // the axis - auto num_of_tensors_to_concat = (n->num_inputs() - 1) / 3; - // mark all mins and maxes static - for (int idx = num_of_tensors_to_concat + 1; idx < n->num_inputs(); - idx++) { - SetStaticInputs(n, {idx}); - } - return Status::OK(); - }; - set_attributes_map["QuantizedConcatV2"] = [](Node* n) { - auto num_of_tensors_to_concat = (n->num_inputs() - 1) / 3; - // mark axis, all mins and maxes static - std::vector static_input_vec; - for (int idx = num_of_tensors_to_concat; idx < n->num_inputs(); idx++) { - static_input_vec.push_back(idx); - } - SetStaticInputs(n, static_input_vec); - return Status::OK(); - }; - set_attributes_map["RandomUniform"] = SetStaticInputs({0}); - set_attributes_map["Reshape"] = SetStaticInputs({1}); - set_attributes_map["ResizeBilinear"] = SetStaticInputs({1}); - set_attributes_map["ScatterNd"] = SetStaticInputs({2}); - set_attributes_map["Slice"] = SetStaticInputs({1, 2}); - set_attributes_map["Split"] = SetStaticInputs({0}); - set_attributes_map["SplitV"] = SetStaticInputs({1, 2}); - set_attributes_map["StridedSlice"] = SetStaticInputs({1, 2, 3}); - set_attributes_map["Sum"] = SetStaticInputs({1}); - set_attributes_map["TopKV2"] = SetStaticInputs({1}); - set_attributes_map["Tile"] = SetStaticInputs({1}); - set_attributes_map["Transpose"] = SetStaticInputs({1}); - set_attributes_map["UnsortedSegmentSum"] = SetStaticInputs({2}); - initialized = true; - } + if (!initialized || op_set_support_has_changed) { + confirmation_function_map = GetConfirmationMap(); + initialized = true; } // Right now it cannot be inside the if(!initialized) block, because it is diff --git a/ngraph_bridge/ngraph_mark_for_clustering.h b/ngraph_bridge/ngraph_mark_for_clustering.h index ea4739741..94872fde7 100644 --- a/ngraph_bridge/ngraph_mark_for_clustering.h +++ b/ngraph_bridge/ngraph_mark_for_clustering.h @@ -32,7 +32,7 @@ Status MarkForClustering(Graph* graph, std::set skip_these_nodes, void ResetMarkForClustering(Graph* graph); Status IsSupportedByBackend( const Node* node, const ngraph::runtime::Backend* op_backend, - std::map>>& + const std::map>>& TFtoNgraphOpMap, bool& is_supported); bool NodeIsMarkedForClustering(const Node* node); @@ -40,6 +40,19 @@ void GetStaticInputs(const Node* node, std::vector* inputs); bool InputIsStatic(const Node* node, int index); Status GetNodeBackend(const Node* node, string* backend_name); void SetNodeBackend(Node* node, const string& backend_name); + +using SetAttributesFunction = std::function; +const std::map& GetAttributeSetters(); + +using TypeConstraintMap = + std::map>>; +const TypeConstraintMap& GetTypeConstraintMap(); + +using ConfirmationFunction = std::function; +const std::map& GetConfirmationMap(); + +const std::map>>& +GetTFToNgOpMap(); } // namespace ngraph_bridge } // namespace tensorflow diff --git a/test/opexecuter.cpp b/test/opexecuter.cpp index 14156c8c5..2fb5a1fa6 100644 --- a/test/opexecuter.cpp +++ b/test/opexecuter.cpp @@ -342,6 +342,25 @@ void OpExecuter::ExecuteOnNGraph(vector& ngraph_outputs, ng_function)) << "Failed to TranslateGraph"; + std::set> TFtoNgraphOpSet = + GetTFToNgOpMap().at(test_op_type_); + + for (auto n : ng_function->get_ops()) { + auto ng_node = dynamic_pointer_cast(n); + bool is_result = (ng_node != nullptr); + if (!is_result && !(n->is_parameter())) { + bool found = false; + for (auto itr : TFtoNgraphOpSet) { + found = n->is_same_op_type(itr); + if (found) break; + } + ASSERT_TRUE(found) << "After translation found ngraph op " + << (n->get_name()) + << " which was not found in map. To fix this issue " + "check GetTFToNgOpMap"; + } + } + // ng function should get same number of outputs ASSERT_EQ(expected_output_datatypes_.size(), ng_function->get_output_size()) << "Number of outputs of requested outputs and ngraph function outputs " diff --git a/test/opexecuter.h b/test/opexecuter.h index b7542df7c..bffba9962 100644 --- a/test/opexecuter.h +++ b/test/opexecuter.h @@ -34,6 +34,7 @@ #include "logging/tf_graph_writer.h" #include "ngraph_bridge/ngraph_backend_manager.h" #include "ngraph_bridge/ngraph_builder.h" +#include "ngraph_bridge/ngraph_mark_for_clustering.h" #include "ngraph_bridge/ngraph_utils.h" #include "test/test_utilities.h" diff --git a/test/test_math_ops.cpp b/test/test_math_ops.cpp index 036a3fcdb..0366ba658 100644 --- a/test/test_math_ops.cpp +++ b/test/test_math_ops.cpp @@ -1772,6 +1772,30 @@ TEST(MathOps, MinimumBroadcasting) { opexecuter.RunTest(); } // end of test op MinimumBroadcasting +// Test op: MaximumBroadcasting +TEST(MathOps, MaximumBroadcasting) { + Scope root = Scope::NewRootScope(); + int dim1 = 2; + int dim2 = 2; + + Tensor A(DT_FLOAT, TensorShape({dim1, dim2})); + Tensor B(DT_FLOAT, TensorShape({dim1})); + + AssignInputValues(A, 7.5f); + AssignInputValues(B, 5.2f); + + vector static_input_indexes = {}; + auto R = ops::Maximum(root, A, B); + + vector output_datatypes = {DT_FLOAT}; + + std::vector sess_run_fetchoutputs = {R}; + OpExecuter opexecuter(root, "Maximum", static_input_indexes, output_datatypes, + sess_run_fetchoutputs); + + opexecuter.RunTest(); +} // end of test op MaximumBroadcasting + // Test op: Negate TEST(MathOps, Negate) { Scope root = Scope::NewRootScope();