diff --git a/ngraph_bridge/CMakeLists.txt b/ngraph_bridge/CMakeLists.txt index fe41b4a85..8596efec9 100644 --- a/ngraph_bridge/CMakeLists.txt +++ b/ngraph_bridge/CMakeLists.txt @@ -35,7 +35,6 @@ set(SRC ngraph_assign_clusters.cc ngraph_builder.cc ngraph_backend_manager.cc - ngraph_backend_config.cc ngraph_capture_variables.cc ngraph_cluster_manager.cc ngraph_deassign_clusters.cc diff --git a/ngraph_bridge/enable_variable_ops/ops/ngraph_ops.cc b/ngraph_bridge/enable_variable_ops/ops/ngraph_ops.cc index d3cb26e98..b4dafe6e0 100644 --- a/ngraph_bridge/enable_variable_ops/ops/ngraph_ops.cc +++ b/ngraph_bridge/enable_variable_ops/ops/ngraph_ops.cc @@ -81,6 +81,7 @@ REGISTER_OP("NGraphEncapsulate") .Attr("ngraph_cluster: int") .Attr("ngraph_graph_id: int") .Attr("ngraph_backend: string") + .Attr("ngraph_device_id: string") .SetIsStateful() .Doc("nGraph Encapsulation Op. For use by the nGraph JIT only."); diff --git a/ngraph_bridge/grappler/ngraph_optimizer.cc b/ngraph_bridge/grappler/ngraph_optimizer.cc index c24802467..1ed6f6033 100644 --- a/ngraph_bridge/grappler/ngraph_optimizer.cc +++ b/ngraph_bridge/grappler/ngraph_optimizer.cc @@ -43,22 +43,22 @@ namespace ngraph_bridge { Status NgraphOptimizer::Init( const tensorflow::RewriterConfig_CustomGraphOptimizer* config) { const auto params = config->parameter_map(); - if (params.count("ngraph_backend")) { - config_backend_name = params.at("ngraph_backend").s(); - NGRAPH_VLOG(3) << config_backend_name; - std::vector additional_attributes = - BackendManager::GetBackendAdditionalAttributes(config_backend_name); - for (size_t i = 0; i < additional_attributes.size(); i++) { - if (params.count(additional_attributes[i])) { - config_map["_ngraph_" + additional_attributes[i]] = - params.at(additional_attributes[i]).s(); - NGRAPH_VLOG(3) << additional_attributes[i] << " " - << config_map["_ngraph_" + additional_attributes[i]]; - } + for (size_t i = 0; i < compulsory_attrs.size(); i++) { + if (params.count(compulsory_attrs[i]) == 0) { + NGRAPH_VLOG(0) << "NGTF_OPTIMIZER: Compulsory attribute " + << compulsory_attrs[i] << " not found."; + return errors::Internal("NGTF_OPTIMIZER: Missing compulsory attributes."); + } + } + config_backend_name = params.at("ngraph_backend").s(); + NGRAPH_VLOG(3) << "Backend name from config: " << config_backend_name; + for (auto i : params) { + if (i.first != "ngraph_backend") { + config_map[(i.first == "device_id" ? "" : "_") + std::string("ngraph_") + + i.first] = i.second.s(); + NGRAPH_VLOG(3) << "Attribute: " << i.first + << " Value: " << config_map["_ngraph_" + i.first]; } - } else { - NGRAPH_VLOG(5) - << "NGTF_OPTIMIZER: parameter_map does not have ngraph_backend"; } return Status::OK(); } @@ -194,7 +194,7 @@ Status NgraphOptimizer::Optimize(tensorflow::grappler::Cluster* cluster, } // Get backend + its configurations, to be attached to the nodes - // Precedence Order: RewriteConfig > Env Variable > BackendManager + // using RewriteConfig string backend_name; if (!config_backend_name.empty()) { if (!BackendManager::IsSupportedBackend(config_backend_name)) { @@ -203,16 +203,6 @@ Status NgraphOptimizer::Optimize(tensorflow::grappler::Cluster* cluster, } backend_name = config_backend_name; NGRAPH_VLOG(1) << "Setting backend from the RewriteConfig " << backend_name; - } else { - TF_RETURN_IF_ERROR( - BackendManager::GetCurrentlySetBackendName(&backend_name)); - // splits into {"ngraph_backend", "_ngraph_device_config"} - config_map = BackendManager::GetBackendAttributeValues( - backend_name); // SplitBackendConfig - backend_name = config_map.at("ngraph_backend"); - // config_map in EncapsulateClusters is not expected to contain - // ngraph_backend - config_map.erase("ngraph_backend"); } NGRAPH_VLOG(0) << "NGraph using backend: " << backend_name; diff --git a/ngraph_bridge/grappler/ngraph_optimizer.h b/ngraph_bridge/grappler/ngraph_optimizer.h index 780e47b2f..7495c4e99 100644 --- a/ngraph_bridge/grappler/ngraph_optimizer.h +++ b/ngraph_bridge/grappler/ngraph_optimizer.h @@ -74,6 +74,7 @@ class NgraphOptimizer : public tensorflow::grappler::CustomGraphOptimizer { private: std::string config_backend_name; std::unordered_map config_map; + std::vector compulsory_attrs = {"ngraph_backend", "device_id"}; void DumpGraphs(Graph&, int, std::string, std::string); diff --git a/ngraph_bridge/ngraph_backend_config.cc b/ngraph_bridge/ngraph_backend_config.cc deleted file mode 100644 index 9bb123a3e..000000000 --- a/ngraph_bridge/ngraph_backend_config.cc +++ /dev/null @@ -1,141 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#include "ngraph_bridge/ngraph_backend_config.h" - -using namespace std; - -namespace tensorflow { - -namespace ngraph_bridge { - -BackendConfig::BackendConfig(const string& backend_name) { - NGRAPH_VLOG(3) << "BackendConfig() "; - backend_name_ = backend_name; - additional_attributes_ = {"device_config"}; -} - -string BackendConfig::Join( - const unordered_map& additional_parameters) { - // If device_config is not found throw an error - try { - additional_parameters.at("device_config"); - } catch (std::out_of_range e1) { - throw std::out_of_range("Attribute device_config not found"); - } - string backend_name = backend_name_; - if (additional_parameters.at("device_config") != "") { - backend_name = - backend_name + ":" + additional_parameters.at("device_config"); - } - return backend_name; -} - -unordered_map BackendConfig::Split( - const string& backend_config) { - unordered_map backend_parameters; - - int delimiter_index = backend_config.find(':'); - if (delimiter_index < 0) { - // ":" not found - backend_parameters["ngraph_backend"] = backend_config; - backend_parameters["_ngraph_device_config"] = ""; - } else { - backend_parameters["ngraph_backend"] = - backend_config.substr(0, delimiter_index); - backend_parameters["_ngraph_device_config"] = - backend_config.substr(delimiter_index + 1); - } - - NGRAPH_VLOG(3) << "Got Backend Name " << backend_parameters["ngraph_backend"]; - NGRAPH_VLOG(3) << "Got Device Config " - << backend_parameters["_ngraph_device_config"]; - - return backend_parameters; -} - -vector BackendConfig::GetAdditionalAttributes() { - return BackendConfig::additional_attributes_; -} - -BackendConfig::~BackendConfig() { - NGRAPH_VLOG(2) << "BackendConfig::~BackendConfig() DONE"; -}; - -// BackendNNPIConfig -// The NNPI backend is not supposed to specify the config parameters -// using the ENV variable or the script. NNPI backend is expected -// to use the RewriterConfig, hence the Split API is not implemented. -BackendNNPIConfig::BackendNNPIConfig() : BackendConfig("NNPI") { - additional_attributes_ = {"device_id", "ice_cores", "max_batch_size"}; -} - -string BackendNNPIConfig::Join( - const unordered_map& additional_parameters) { - // If device_id is not found throw an error - try { - additional_parameters.at("device_id"); - } catch (std::out_of_range e1) { - throw std::out_of_range("Attribute device_id not found"); - } - string backend_name = backend_name_; - if (additional_parameters.at("device_id") != "") { - backend_name = backend_name + ":" + additional_parameters.at("device_id"); - } - return backend_name; -} - -BackendNNPIConfig::~BackendNNPIConfig() { - NGRAPH_VLOG(3) << "BackendNNPIConfig::~BackendNNPIConfig() DONE"; -}; - -// BackendInterpreterConfig -BackendInterpreterConfig::BackendInterpreterConfig() - : BackendConfig("INTERPRETER") { - additional_attributes_ = {"test_echo"}; -} - -string BackendInterpreterConfig::Join( - const unordered_map& additional_parameters) { - NGRAPH_VLOG(3) << "BackendInterpreterConfig::Join - return the backend name"; - return backend_name_; -} - -unordered_map BackendInterpreterConfig::Split( - const string& backend_config) { - unordered_map backend_parameters; - - int delimiter_index = backend_config.find(':'); - if (delimiter_index < 0) { - // ":" not found - backend_parameters["ngraph_backend"] = backend_config; - backend_parameters["_ngraph_test_echo"] = ""; - } else { - backend_parameters["ngraph_backend"] = - backend_config.substr(0, delimiter_index); - backend_parameters["_ngraph_test_echo"] = - backend_config.substr(delimiter_index + 1); - } - return backend_parameters; -} - -BackendInterpreterConfig::~BackendInterpreterConfig() { - NGRAPH_VLOG(3) - << "BackendInterpreterConfig::~BackendInterpreterConfig() DONE"; -}; - -} // namespace ngraph_bridge -} // namespace tensorflow \ No newline at end of file diff --git a/ngraph_bridge/ngraph_backend_config.h b/ngraph_bridge/ngraph_backend_config.h deleted file mode 100644 index f8f7ff355..000000000 --- a/ngraph_bridge/ngraph_backend_config.h +++ /dev/null @@ -1,68 +0,0 @@ -/******************************************************************************* - * Copyright 2019 Intel Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *******************************************************************************/ - -#ifndef NGRAPH_TF_BRIDGE_BACKEND_CONFIG_H_ -#define NGRAPH_TF_BRIDGE_BACKEND_CONFIG_H_ - -#include - -#include "tensorflow/core/lib/core/errors.h" - -#include "logging/ngraph_log.h" -using namespace std; - -namespace tensorflow { - -namespace ngraph_bridge { - -class BackendConfig { - public: - BackendConfig() = delete; - BackendConfig(const string& backend_name); - vector GetAdditionalAttributes(); - - virtual unordered_map Split(const string& backend_config); - virtual string Join( - const unordered_map& additional_parameters); - virtual ~BackendConfig(); - - protected: - string backend_name_; - vector additional_attributes_; -}; - -class BackendNNPIConfig : public BackendConfig { - public: - BackendNNPIConfig(); - string Join( - const unordered_map& additional_parameters) override; - virtual ~BackendNNPIConfig(); -}; - -class BackendInterpreterConfig : public BackendConfig { - public: - BackendInterpreterConfig(); - unordered_map Split(const string& backend_config) override; - string Join( - const unordered_map& additional_parameters) override; - virtual ~BackendInterpreterConfig(); -}; - -} // namespace ngraph_bridge -} // namespace tensorflow - -#endif -// NGRAPH_TF_BRIDGE_BACKEND_CONFIG_H \ No newline at end of file diff --git a/ngraph_bridge/ngraph_backend_manager.cc b/ngraph_bridge/ngraph_backend_manager.cc index dc799ddc3..332bd0979 100644 --- a/ngraph_bridge/ngraph_backend_manager.cc +++ b/ngraph_bridge/ngraph_backend_manager.cc @@ -34,9 +34,6 @@ map BackendManager::ng_backend_map_; mutex BackendManager::ng_backend_map_mutex_; map BackendManager::ref_count_each_backend_; -unordered_map> - BackendManager::ng_backendconfig_map_; - Status BackendManager::SetBackendName(const string& backend_name) { std::lock_guard lock(BackendManager::ng_backend_name_mutex_); if (backend_name.empty() || !IsSupportedBackend(backend_name)) { @@ -171,53 +168,37 @@ Status BackendManager::GetCurrentlySetBackendName(string* backend_name) { return Status::OK(); }; -// Backend Config functions -// BackendConfig is expected to be a readonly class -// hence only locked at creation and not during later access -std::unique_ptr& BackendManager::GetBackendConfig( - const string& backend_name) { - std::lock_guard lock(BackendManager::ng_backend_map_mutex_); - auto itr = BackendManager::ng_backendconfig_map_.find(backend_name); - if (itr == BackendManager::ng_backendconfig_map_.end()) { - if (backend_name == "NNPI") { - BackendManager::ng_backendconfig_map_.insert(std::make_pair( - backend_name, - std::unique_ptr(new BackendNNPIConfig()))); - } - if (backend_name == "INTERPRETER") { - BackendManager::ng_backendconfig_map_.insert(std::make_pair( - backend_name, std::unique_ptr( - new BackendInterpreterConfig()))); - } else { - BackendManager::ng_backendconfig_map_.insert(std::make_pair( - backend_name, - std::unique_ptr(new BackendConfig(backend_name)))); - } - } - return BackendManager::ng_backendconfig_map_.at(backend_name); -} - -vector BackendManager::GetBackendAdditionalAttributes( - const string& backend_name) { - return BackendManager::GetBackendConfig(backend_name) - ->GetAdditionalAttributes(); -} - +// Split unordered_map BackendManager::GetBackendAttributeValues( const string& backend_config) { unordered_map backend_parameters; - string backend_name = backend_config.substr(0, backend_config.find(':')); - NGRAPH_VLOG(3) << "Got Backend Name " << backend_name; + int delimiter_index = backend_config.find(':'); + if (delimiter_index < 0) { + // ":" not found + backend_parameters["ngraph_backend"] = backend_config; + backend_parameters["ngraph_device_id"] = ""; + } else { + backend_parameters["ngraph_backend"] = + backend_config.substr(0, delimiter_index); + backend_parameters["ngraph_device_id"] = + backend_config.substr(delimiter_index + 1); + } + + NGRAPH_VLOG(3) << "Got Backend Name " << backend_parameters["ngraph_backend"]; + NGRAPH_VLOG(3) << "Got Device Id " << backend_parameters["ngraph_device_id"]; - return BackendManager::GetBackendConfig(backend_name)->Split(backend_config); + return backend_parameters; } -string BackendManager::GetBackendCreationString( - const string& backend_name, - const unordered_map& additional_attribute_map) { - return BackendManager::GetBackendConfig(backend_name) - ->Join(additional_attribute_map); +// Join +string BackendManager::GetBackendCreationString(const string& backend_name, + const string& device_id) { + if (device_id != "") { + return backend_name + ":" + device_id; + } else { + return backend_name; + } } } // namespace ngraph_bridge diff --git a/ngraph_bridge/ngraph_backend_manager.h b/ngraph_bridge/ngraph_backend_manager.h index 5c227b9bf..a0b1141da 100644 --- a/ngraph_bridge/ngraph_backend_manager.h +++ b/ngraph_bridge/ngraph_backend_manager.h @@ -31,7 +31,6 @@ #include "ngraph/runtime/backend_manager.hpp" #include "logging/ngraph_log.h" -#include "ngraph_bridge/ngraph_backend_config.h" using namespace std; namespace ng = ngraph; @@ -116,15 +115,10 @@ class BackendManager { GetBackendAttributeValues( // SplitBackendConfig const string& backend_config); - // Given a backend name and list of attributes + // Given a backend name and device id // joins them into a string to create ngraph backend - // For e.g. - // 1. GetBackendCreationString("GPU", {"_ngraph_device_config", "2"}) - // returns "GPU:2" - // throws an error if the required attributes are not present in the map - static string GetBackendCreationString( - const string& backend_name, - const unordered_map& additional_attribute_map); + static string GetBackendCreationString(const string& backend_name, + const string& device_id); ~BackendManager(); @@ -136,17 +130,8 @@ class BackendManager { static map ng_backend_map_; static mutex ng_backend_map_mutex_; - // map of cached backend config objects - static unordered_map> - ng_backendconfig_map_; - static mutex ng_backendconfig_map_mutex_; - // Map of backends and their reference counts static std::map ref_count_each_backend_; - - // utility functions - static std::unique_ptr& GetBackendConfig( - const string& backend_name); }; } // namespace ngraph_bridge diff --git a/ngraph_bridge/ngraph_encapsulate_op.cc b/ngraph_bridge/ngraph_encapsulate_op.cc index 6421b7d95..735625299 100644 --- a/ngraph_bridge/ngraph_encapsulate_op.cc +++ b/ngraph_bridge/ngraph_encapsulate_op.cc @@ -20,6 +20,7 @@ #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/optimization_registry.h" +#include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" @@ -159,25 +160,31 @@ class NGraphEncapsulateOp : public OpKernel { // Set the backend type for the op std::string backend_name; OP_REQUIRES_OK(ctx, ctx->GetAttr("ngraph_backend", &backend_name)); + std::string device_id; + OP_REQUIRES_OK(ctx, ctx->GetAttr("ngraph_device_id", &device_id)); // Get the optional attributes - std::vector additional_attributes = - BackendManager::GetBackendAdditionalAttributes(backend_name); std::unordered_map additional_attribute_map; - for (size_t i = 0; i < additional_attributes.size(); i++) { - std::string val; - // Append _ngraph_ to the additional attributes since they - // are added as optional attributes with a `ngraph` prefix - // to the encapsulate node - std::string attr = "_ngraph_" + additional_attributes[i]; - // If an attribute does not exist, TF will return a non-ok status - OP_REQUIRES_OK(ctx, ctx->GetAttr(attr, &val)); - additional_attribute_map.insert({additional_attributes[i], val}); + auto node_def = ctx->def(); + auto additional_attributes = node_def.attr(); + for (auto itx : additional_attributes) { + // Find the optional attributes to be sent to the backend. + // The optional attributes have '_ngraph_' appended to the start + // so we need to get rid of that and only send the remaining string + // since the backend will only look for that. + // '_ngraph_' is only appended for the bridge. + // For e.g. _ngraph_ice_cores --> ice_cores + if (itx.first.find("_ngraph_") != std::string::npos) { + NGRAPH_VLOG(4) << "Attribute: " << itx.first.substr(strlen("_ngraph_")) + << " Value: " << itx.second.s(); + additional_attribute_map.insert( + {itx.first.substr(strlen("_ngraph_")), itx.second.s()}); + } } - // Concatenate the backend_name:backend_config + // Concatenate the backend_name:device_id try { - m_op_backend_name = BackendManager::GetBackendCreationString( - backend_name, additional_attribute_map); + m_op_backend_name = + BackendManager::GetBackendCreationString(backend_name, device_id); } catch (const std::exception& exp) { Status status = errors::Internal( "Caught exception while creating backend string ", exp.what(), "\n"); diff --git a/ngraph_bridge/ops/ngraph_ops.cc b/ngraph_bridge/ops/ngraph_ops.cc index f7372a319..ff91415b4 100644 --- a/ngraph_bridge/ops/ngraph_ops.cc +++ b/ngraph_bridge/ops/ngraph_ops.cc @@ -29,6 +29,7 @@ REGISTER_OP("NGraphEncapsulate") .Attr("ngraph_cluster: int") .Attr("ngraph_graph_id: int") .Attr("ngraph_backend: string") + .Attr("ngraph_device_id: string") .SetIsStateful() .Doc("nGraph Encapsulation Op. For use by the nGraph JIT only."); diff --git a/python/ngraph_bridge/__init__.in.py b/python/ngraph_bridge/__init__.in.py index 307e94671..405bf9606 100644 --- a/python/ngraph_bridge/__init__.in.py +++ b/python/ngraph_bridge/__init__.in.py @@ -199,17 +199,17 @@ def cxx11_abi_flag(): def is_grappler_enabled(): return ngraph_bridge_lib.ngraph_tf_is_grappler_enabled() -def update_config(config): +def update_config(config, backend_name = "CPU", device_id = ""): #updating session config if grappler is enabled if(ngraph_bridge_lib.ngraph_tf_is_grappler_enabled()): - rewrite_options = rewriter_config_pb2.RewriterConfig( - meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE, - min_graph_nodes=-1, - custom_optimizers=[ - rewriter_config_pb2.RewriterConfig.CustomGraphOptimizer( - name="ngraph-optimizer") - ]) - config.MergeFrom(tf.ConfigProto(graph_options=tf.GraphOptions(rewrite_options=rewrite_options))) + rewriter_options = rewriter_config_pb2.RewriterConfig() + rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE) + rewriter_options.min_graph_nodes=-1 + ngraph_optimizer = rewriter_options.custom_optimizers.add() + ngraph_optimizer.name = "ngraph-optimizer" + ngraph_optimizer.parameter_map["ngraph_backend"].s = backend_name.encode() + ngraph_optimizer.parameter_map["device_id"].s = device_id.encode() + config.MergeFrom(tf.ConfigProto(graph_options=tf.GraphOptions(rewrite_options=rewriter_options))) # For reference, if we want to provide configuration support(backend parameters) # in a python script using the ngraph-optimizer # rewriter_options = rewriter_config_pb2.RewriterConfig() @@ -217,8 +217,8 @@ def update_config(config): # rewriter_options.min_graph_nodes=-1 # ngraph_optimizer = rewriter_options.custom_optimizers.add() # ngraph_optimizer.name = "ngraph-optimizer" - # ngraph_optimizer.parameter_map["ngraph_backend"].s = b'NNPI' - # ngraph_optimizer.parameter_map["device_id"].s = b'1' + # ngraph_optimizer.parameter_map["ngraph_backend"].s = backend_name.encode() + # ngraph_optimizer.parameter_map["device_id"].s = device_id.encode() # ngraph_optimizer.parameter_map["max_batch_size"].s = b'64' # ngraph_optimizer.parameter_map["ice_cores"].s = b'12' # config.MergeFrom(tf.ConfigProto(graph_options=tf.GraphOptions(rewrite_options=rewriter_options))) diff --git a/test/ci/buildkite/test_runner.py b/test/ci/buildkite/test_runner.py index d4276c5de..d0612e392 100755 --- a/test/ci/buildkite/test_runner.py +++ b/test/ci/buildkite/test_runner.py @@ -86,6 +86,7 @@ def main(): raise Exception("Need to specify --artifacts_dir") # Set the backend if specified + # NOTE: This way of backend setting will not work with grappler if (arguments.backend): os.environ['NGRAPH_TF_BACKEND'] = arguments.backend diff --git a/test/graph_rewrites/backend_manager_test.cc b/test/graph_rewrites/backend_manager_test.cc index b59df6de7..52d5a6d02 100644 --- a/test/graph_rewrites/backend_manager_test.cc +++ b/test/graph_rewrites/backend_manager_test.cc @@ -194,21 +194,6 @@ TEST(BackendManager, BackendClustering) { ASSERT_NE(A_cluster, B_cluster); } -// Test GetBackendAdditionalAttributes API -TEST(BackendManager, GetBackendAdditionalAttributes) { - vector default_backend_optional_attrs = {"device_config"}; - vector nnpi_backend_optional_attrs = {"device_id", "ice_cores", - "max_batch_size"}; - - auto cpu_options = BackendManager::GetBackendAdditionalAttributes("CPU"); - auto nnpi_options = BackendManager::GetBackendAdditionalAttributes("NNPI"); - auto gpu_options = BackendManager::GetBackendAdditionalAttributes("GPU"); - - ASSERT_EQ(cpu_options, default_backend_optional_attrs); - ASSERT_EQ(nnpi_options, nnpi_backend_optional_attrs); - ASSERT_EQ(gpu_options, default_backend_optional_attrs); -} - // Test GetBackendAttributeValues API TEST(BackendManager, GetBackendAttributeValues) { auto cpu_options = BackendManager::GetBackendAttributeValues("CPU"); @@ -218,67 +203,42 @@ TEST(BackendManager, GetBackendAttributeValues) { BackendManager::GetBackendAttributeValues("PLAIDML:device:567:892_34"); ASSERT_NE(cpu_options.find("ngraph_backend"), cpu_options.end()); - ASSERT_NE(cpu_options.find("_ngraph_device_config"), cpu_options.end()); + ASSERT_NE(cpu_options.find("ngraph_device_id"), cpu_options.end()); ASSERT_EQ(cpu_options["ngraph_backend"], "CPU"); - ASSERT_EQ(cpu_options["_ngraph_device_config"], ""); + ASSERT_EQ(cpu_options["ngraph_device_id"], ""); ASSERT_NE(nnpi_options.find("ngraph_backend"), nnpi_options.end()); - ASSERT_NE(nnpi_options.find("_ngraph_device_config"), nnpi_options.end()); + ASSERT_NE(nnpi_options.find("ngraph_device_id"), nnpi_options.end()); ASSERT_EQ(nnpi_options["ngraph_backend"], "NNPI"); - ASSERT_EQ(nnpi_options["_ngraph_device_config"], "3,5,6"); + ASSERT_EQ(nnpi_options["ngraph_device_id"], "3,5,6"); ASSERT_NE(gpu_options.find("ngraph_backend"), gpu_options.end()); - ASSERT_NE(gpu_options.find("_ngraph_device_config"), gpu_options.end()); + ASSERT_NE(gpu_options.find("ngraph_device_id"), gpu_options.end()); ASSERT_EQ(gpu_options["ngraph_backend"], "GPU"); - ASSERT_EQ(gpu_options["_ngraph_device_config"], "5"); + ASSERT_EQ(gpu_options["ngraph_device_id"], "5"); ASSERT_NE(plaidml_options.find("ngraph_backend"), plaidml_options.end()); - ASSERT_NE(plaidml_options.find("_ngraph_device_config"), - plaidml_options.end()); + ASSERT_NE(plaidml_options.find("ngraph_device_id"), plaidml_options.end()); ASSERT_EQ(plaidml_options["ngraph_backend"], "PLAIDML"); - ASSERT_EQ(plaidml_options["_ngraph_device_config"], "device:567:892_34"); + ASSERT_EQ(plaidml_options["ngraph_device_id"], "device:567:892_34"); } // Test GetBackendCreationString API TEST(BackendManager, GetBackendCreationString) { - unordered_map cpu_map = {{"device_config", ""}}; - unordered_map nnpi_map = {{"device_id", "5"}}; - unordered_map gpu_map = {{"device_config", "678"}}; + string cpu_device_id = ""; + string nnpi_device_id = "5"; + string gpu_device_id = "678"; - auto cpu_backend = BackendManager::GetBackendCreationString("CPU", cpu_map); + auto cpu_backend = + BackendManager::GetBackendCreationString("CPU", cpu_device_id); auto nnpi_backend = - BackendManager::GetBackendCreationString("NNPI", nnpi_map); - auto gpu_backend = BackendManager::GetBackendCreationString("GPU", gpu_map); + BackendManager::GetBackendCreationString("NNPI", nnpi_device_id); + auto gpu_backend = + BackendManager::GetBackendCreationString("GPU", gpu_device_id); ASSERT_EQ(cpu_backend, "CPU"); ASSERT_EQ(nnpi_backend, "NNPI:5"); ASSERT_EQ(gpu_backend, "GPU:678"); - - // throw errors - unordered_map test_empty_map = {}; - // "device_config" is not valid for NNPI - unordered_map test_missing_config_nnpi = { - {"device_config", "345"}}; - // "device_id" is not valid for default configs - unordered_map test_missing_config_default = { - {"device_id", "45"}}; - - ASSERT_THROW(BackendManager::GetBackendCreationString("CPU", test_empty_map), - std::out_of_range); - ASSERT_THROW(BackendManager::GetBackendCreationString("NNPI", test_empty_map), - std::out_of_range); - ASSERT_THROW(BackendManager::GetBackendCreationString("GPU", test_empty_map), - std::out_of_range); - - ASSERT_THROW(BackendManager::GetBackendCreationString( - "CPU", test_missing_config_default), - std::out_of_range); - ASSERT_THROW(BackendManager::GetBackendCreationString( - "NNPI", test_missing_config_nnpi), - std::out_of_range); - ASSERT_THROW(BackendManager::GetBackendCreationString( - "GPU", test_missing_config_default), - std::out_of_range); } } // namespace testing diff --git a/test/graph_rewrites/config_for_grappler_test.cc b/test/graph_rewrites/config_for_grappler_test.cc index ea180444a..4207b7b22 100644 --- a/test/graph_rewrites/config_for_grappler_test.cc +++ b/test/graph_rewrites/config_for_grappler_test.cc @@ -26,14 +26,12 @@ #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/public/session.h" +#include "logging/tf_graph_writer.h" #include "ngraph_bridge/ngraph_assign_clusters.h" -#include "ngraph_bridge/ngraph_backend_config.h" #include "ngraph_bridge/ngraph_backend_manager.h" #include "ngraph_bridge/ngraph_mark_for_clustering.h" #include "test/test_utilities.h" -#include "logging/tf_graph_writer.h" - using namespace std; namespace ng = ngraph; @@ -44,6 +42,7 @@ namespace ngraph_bridge { namespace testing { #define ASSERT_OK(x) ASSERT_EQ((x), ::tensorflow::Status::OK()); +#define ASSERT_NOT_OK(x) ASSERT_NE((x), ::tensorflow::Status::OK()); // This test can only be run when nGraph-bridge is built with grappler // When running with other modes, grappler's ngraph-optimizer is not @@ -73,8 +72,8 @@ TEST(GrapplerConfig, RConfig1) { ConfigProto config_proto; auto backend_name = AttrValue(); backend_name.set_s("CPU"); - auto device_config = AttrValue(); - device_config.set_s("1"); + auto device_id = AttrValue(); + device_id.set_s("1"); auto& rewriter_config = *config_proto.mutable_graph_options()->mutable_rewrite_options(); rewriter_config.add_optimizers("ngraph-optimizer"); @@ -83,7 +82,7 @@ TEST(GrapplerConfig, RConfig1) { auto* custom_config = rewriter_config.add_custom_optimizers(); custom_config->set_name("ngraph-optimizer"); (*custom_config->mutable_parameter_map())["ngraph_backend"] = backend_name; - (*custom_config->mutable_parameter_map())["device_config"] = device_config; + (*custom_config->mutable_parameter_map())["device_id"] = device_id; // Run grappler tensorflow::grappler::MetaOptimizer optimizer(nullptr, config_proto); @@ -107,18 +106,18 @@ TEST(GrapplerConfig, RConfig1) { ng_encap = node; } ASSERT_NE(ng_encap, nullptr); - string ng_backend, ng_device_config; + string ng_backend, ng_device_id; ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_backend", &ng_backend)); - ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "_ngraph_device_config", - &ng_device_config)); + ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_device_id", &ng_device_id)); ASSERT_EQ(ng_backend, "CPU"); - ASSERT_EQ(ng_device_config, "1"); + ASSERT_EQ(ng_device_id, "1"); } // Though Backend is set via BackendManager -// The backend set via RewriterConfig takes precedence +// The backend set via RewriterConfig takes affect +// since that is the only way of setting backend with grappler TEST(GrapplerConfig, RConfig2) { // Create Graph Scope root = Scope::NewRootScope(); @@ -137,7 +136,8 @@ TEST(GrapplerConfig, RConfig2) { } // set backend - // Though we set the backend, the rewriter-config takes precedence + // Though we set the backend, the rewriter-config takes affect + // since that is the only way of setting backend with grappler ASSERT_OK(BackendManager::SetBackendName("INTERPRETER")); // Create GraphDef and Grappler @@ -146,8 +146,8 @@ TEST(GrapplerConfig, RConfig2) { ConfigProto config_proto; auto backend_name = AttrValue(); backend_name.set_s("CPU"); - auto device_config = AttrValue(); - device_config.set_s("1"); + auto device_id = AttrValue(); + device_id.set_s("1"); auto& rewriter_config = *config_proto.mutable_graph_options()->mutable_rewrite_options(); rewriter_config.add_optimizers("ngraph-optimizer"); @@ -156,7 +156,7 @@ TEST(GrapplerConfig, RConfig2) { auto* custom_config = rewriter_config.add_custom_optimizers(); custom_config->set_name("ngraph-optimizer"); (*custom_config->mutable_parameter_map())["ngraph_backend"] = backend_name; - (*custom_config->mutable_parameter_map())["device_config"] = device_config; + (*custom_config->mutable_parameter_map())["device_id"] = device_id; // Run grappler tensorflow::grappler::MetaOptimizer optimizer(nullptr, config_proto); @@ -180,21 +180,21 @@ TEST(GrapplerConfig, RConfig2) { ng_encap = node; } ASSERT_NE(ng_encap, nullptr); - string ng_backend, ng_device_config; + string ng_backend, ng_device_id; ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_backend", &ng_backend)); - ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "_ngraph_device_config", - &ng_device_config)); + ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_device_id", &ng_device_id)); ASSERT_EQ(ng_backend, "CPU"); - ASSERT_EQ(ng_device_config, "1"); + ASSERT_EQ(ng_device_id, "1"); // Clean up ASSERT_OK(BackendManager::SetBackendName("CPU")); } // Though Backend is set via NGRAPH_TF_BACKEND -// The backend set via RewriterConfig takes precedence +// The backend set via RewriterConfig takes affect +// since that is the only way of setting backend with grappler TEST(GrapplerConfig, RConfig3) { // If NGRAPH_TF_BACKEND is set, unset it const unordered_map& env_map = StoreEnv(); @@ -227,7 +227,7 @@ TEST(GrapplerConfig, RConfig3) { ASSERT_EQ("NOP", check_backend); // Though we set the backend and NGRAPH_TF_BACKEND - // the rewriter-config takes precedence + // the rewriter-config takes affect // Create GraphDef and Grappler grappler::GrapplerItem item; @@ -235,8 +235,8 @@ TEST(GrapplerConfig, RConfig3) { ConfigProto config_proto; auto backend_name = AttrValue(); backend_name.set_s("CPU"); - auto device_config = AttrValue(); - device_config.set_s("1"); + auto device_id = AttrValue(); + device_id.set_s("1"); auto& rewriter_config = *config_proto.mutable_graph_options()->mutable_rewrite_options(); rewriter_config.add_optimizers("ngraph-optimizer"); @@ -245,7 +245,7 @@ TEST(GrapplerConfig, RConfig3) { auto* custom_config = rewriter_config.add_custom_optimizers(); custom_config->set_name("ngraph-optimizer"); (*custom_config->mutable_parameter_map())["ngraph_backend"] = backend_name; - (*custom_config->mutable_parameter_map())["device_config"] = device_config; + (*custom_config->mutable_parameter_map())["device_id"] = device_id; // Run grappler tensorflow::grappler::MetaOptimizer optimizer(nullptr, config_proto); @@ -269,14 +269,13 @@ TEST(GrapplerConfig, RConfig3) { ng_encap = node; } ASSERT_NE(ng_encap, nullptr); - string ng_backend, ng_device_config; + string ng_backend, ng_device_id; ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_backend", &ng_backend)); - ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "_ngraph_device_config", - &ng_device_config)); + ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_device_id", &ng_device_id)); ASSERT_EQ(ng_backend, "CPU"); - ASSERT_EQ(ng_device_config, "1"); + ASSERT_EQ(ng_device_id, "1"); // Clean up ASSERT_OK(BackendManager::SetBackendName("CPU")); @@ -307,8 +306,10 @@ TEST(GrapplerConfig, RConfig4) { ConfigProto config_proto; auto backend_name = AttrValue(); backend_name.set_s("INTERPRETER"); + auto device_id = AttrValue(); + device_id.set_s("5"); auto test_echo = AttrValue(); - test_echo.set_s("5"); + test_echo.set_s("hi"); auto& rewriter_config = *config_proto.mutable_graph_options()->mutable_rewrite_options(); rewriter_config.add_optimizers("ngraph-optimizer"); @@ -317,6 +318,7 @@ TEST(GrapplerConfig, RConfig4) { auto* custom_config = rewriter_config.add_custom_optimizers(); custom_config->set_name("ngraph-optimizer"); (*custom_config->mutable_parameter_map())["ngraph_backend"] = backend_name; + (*custom_config->mutable_parameter_map())["device_id"] = device_id; (*custom_config->mutable_parameter_map())["test_echo"] = test_echo; // Run grappler @@ -340,13 +342,58 @@ TEST(GrapplerConfig, RConfig4) { ng_encap = node; } ASSERT_NE(ng_encap, nullptr); - string ng_backend, ng_test_echo; + string ng_backend, ng_test_echo, ng_device_id; ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_backend", &ng_backend)); ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "_ngraph_test_echo", &ng_test_echo)); + ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "ngraph_device_id", &ng_device_id)); ASSERT_EQ(ng_backend, "INTERPRETER"); - ASSERT_EQ(ng_test_echo, "5"); + ASSERT_EQ(ng_test_echo, "hi"); + ASSERT_EQ(ng_device_id, "5"); +} + +// Test the failure case where the compulsory attribute device_id +// is not provided using the rewriter config +TEST(GrapplerConfig, RConfig5) { + // Create Graph + Scope root = Scope::NewRootScope(); + auto A = ops::Const(root.WithOpName("A"), {3.f, 2.f}); + auto B = ops::Const(root.WithOpName("B"), {3.f, 2.f}); + auto Add = ops::Add(root.WithOpName("Add"), A, B); + auto C = ops::Const(root.WithOpName("C"), {3.f, 2.f}); + auto Mul = ops::Mul(root.WithOpName("Mul"), Add, C); + + Graph graph(OpRegistry::Global()); + TF_CHECK_OK(root.ToGraph(&graph)); + + // set device specification + for (auto node : graph.op_nodes()) { + node->set_requested_device("CPU"); + } + + // Create GraphDef and Grappler + grappler::GrapplerItem item; + graph.ToGraphDef(&item.graph); + ConfigProto config_proto; + auto backend_name = AttrValue(); + backend_name.set_s("CPU"); + auto device_id = AttrValue(); + device_id.set_s("5"); + auto& rewriter_config = + *config_proto.mutable_graph_options()->mutable_rewrite_options(); + rewriter_config.add_optimizers("ngraph-optimizer"); + rewriter_config.set_min_graph_nodes(-1); + rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE); + auto* custom_config = rewriter_config.add_custom_optimizers(); + custom_config->set_name("ngraph-optimizer"); + (*custom_config->mutable_parameter_map())["ngraph_backend"] = backend_name; + + // Run grappler + tensorflow::grappler::MetaOptimizer optimizer(nullptr, config_proto); + GraphDef output; + + ASSERT_NOT_OK(optimizer.Optimize(nullptr, item, &output)); } } // namespace testing diff --git a/test/graph_rewrites/encapsulate_clusters_test.cc b/test/graph_rewrites/encapsulate_clusters_test.cc index 8d7c24257..e94914fad 100644 --- a/test/graph_rewrites/encapsulate_clusters_test.cc +++ b/test/graph_rewrites/encapsulate_clusters_test.cc @@ -80,7 +80,9 @@ TEST(EncapsulateClusters, PopulateLibrary) { g.AddEdge(node3, Graph::kControlSlot, sink, Graph::kControlSlot); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&g, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&g, 0, fdeflib_new, config_map)); int num_encapsulates = 0; int num_tf_nodes = 0; diff --git a/test/graph_rewrites/enter_in_catalog_test.cc b/test/graph_rewrites/enter_in_catalog_test.cc index 13dd0bfdc..c31872e5d 100644 --- a/test/graph_rewrites/enter_in_catalog_test.cc +++ b/test/graph_rewrites/enter_in_catalog_test.cc @@ -76,7 +76,9 @@ TEST(CatalogTest, SmallGraph1) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); ASSERT_OK(EnterInCatalog(&graph, 0)); bool remove = false; @@ -118,7 +120,9 @@ TEST(CatalogTest, SmallGraph2) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); ASSERT_OK(EnterInCatalog(&graph, 0)); bool remove = false; @@ -187,7 +191,9 @@ TEST(CatalogTest, SmallGraph3) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); ASSERT_OK(EnterInCatalog(&graph, 0)); // check if the _ngraph_remove attribute is added/not-added as expected @@ -251,7 +257,9 @@ TEST(CatalogTest, SmallGraph4) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); ASSERT_OK(EnterInCatalog(&graph, 0)); string key; diff --git a/test/graph_rewrites/remove_ngraphassigns.cc b/test/graph_rewrites/remove_ngraphassigns.cc index 72fc0d815..b620caef6 100644 --- a/test/graph_rewrites/remove_ngraphassigns.cc +++ b/test/graph_rewrites/remove_ngraphassigns.cc @@ -70,7 +70,9 @@ TEST(RemoveNGraphAssigns, Graph1) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); // Get all the nodes in map [utility] map node_map; @@ -126,7 +128,9 @@ TEST(RemoveNGraphAssigns, Graph2) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); // clean up config::ngraph_set_disabled_ops(""); @@ -217,7 +221,9 @@ TEST(RemoveNGraphAssigns, Graph3) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); // Get all the nodes in map [utility] map node_map; @@ -314,7 +320,9 @@ TEST(RemoveNGraphAssigns, Graph4) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); // clean up config::ngraph_set_disabled_ops(""); @@ -410,7 +418,9 @@ TEST(RemoveNGraphAssigns, Graph5) { ASSERT_OK(MarkForClustering(&graph, skip_these_nodes, "CPU")); ASSERT_OK(AssignClusters(&graph)); FunctionDefLibrary* fdeflib_new = new FunctionDefLibrary(); - ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, {})); + std::unordered_map config_map; + config_map["ngraph_device_id"] = ""; + ASSERT_OK(EncapsulateClusters(&graph, 0, fdeflib_new, config_map)); // Get all the nodes in map [utility] map node_map; diff --git a/test/grappler/benchmark_cnn.patch b/test/grappler/benchmark_cnn.patch index 39f4911dd..ebe2d42cd 100644 --- a/test/grappler/benchmark_cnn.patch +++ b/test/grappler/benchmark_cnn.patch @@ -1,15 +1,17 @@ diff --git a/scripts/tf_cnn_benchmarks/benchmark_cnn.py b/scripts/tf_cnn_benchmarks/benchmark_cnn.py -index d3b81d5..fd7aad1 100644 +index d3b81d5..f3620da 100644 --- a/scripts/tf_cnn_benchmarks/benchmark_cnn.py +++ b/scripts/tf_cnn_benchmarks/benchmark_cnn.py -@@ -667,6 +667,10 @@ def create_config_proto(params): +@@ -667,9 +667,9 @@ def create_config_proto(params): config.gpu_options.visible_device_list = str(hvd.local_rank()) if params.variable_update == 'collective_all_reduce': config.gpu_options.experimental.num_dev_to_dev_copy_streams = 2 -+ rewrite_options = rewriter_config_pb2.RewriterConfig( -+ meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE, -+ custom_optimizers=[rewriter_config_pb2.RewriterConfig.CustomGraphOptimizer(name="ngraph-optimizer")]) -+ config.graph_options.rewrite_options.MergeFrom(rewrite_options) - - return config +- +- return config +- ++ import ngraph_bridge ++ config_new = ngraph_bridge.update_config(config) ++ return config_new + def get_mode_from_params(params): + """Returns the mode in which this script is running. diff --git a/test/python/flib_graph_1.pbtxt b/test/python/flib_graph_1.pbtxt index 95efd5437..5662e47e2 100644 --- a/test/python/flib_graph_1.pbtxt +++ b/test/python/flib_graph_1.pbtxt @@ -103,7 +103,7 @@ node { } } attr { - key: "_ngraph_device_config" + key: "ngraph_device_id" value { s: "" } diff --git a/test/python/flib_graph_2.pbtxt b/test/python/flib_graph_2.pbtxt index 02ae0792c..8cc4f40fa 100644 --- a/test/python/flib_graph_2.pbtxt +++ b/test/python/flib_graph_2.pbtxt @@ -175,7 +175,7 @@ node { } } attr { - key: "_ngraph_device_config" + key: "ngraph_device_id" value { s: "" } diff --git a/test/python/tensorflow/tf_unittest_ngraph_with_grappler.patch b/test/python/tensorflow/tf_unittest_ngraph_with_grappler.patch index 5825bc2fb..fee35e927 100644 --- a/test/python/tensorflow/tf_unittest_ngraph_with_grappler.patch +++ b/test/python/tensorflow/tf_unittest_ngraph_with_grappler.patch @@ -1,23 +1,15 @@ diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py -index af1687c8ef..56bc84f87e 100644 +index 544190e23d..19e621478e 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py -@@ -1385,6 +1385,7 @@ class TensorFlowTestCase(googletest.TestCase): - self._threads = [] - self._tempdir = None - self._cached_session = None -+ import ngraph_bridge - - def setUp(self): - self._ClearCachedSession() -@@ -2449,6 +2450,10 @@ class TensorFlowTestCase(googletest.TestCase): +@@ -2737,7 +2737,9 @@ class TensorFlowTestCase(googletest.TestCase): rewriter_config_pb2.RewriterConfig.OFF) config.graph_options.rewrite_options.pin_to_host_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) -+ rewrite_options = rewriter_config_pb2.RewriterConfig( -+ meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE, -+ custom_optimizers=[rewriter_config_pb2.RewriterConfig.CustomGraphOptimizer(name="ngraph-optimizer")]) -+ config.graph_options.rewrite_options.MergeFrom(rewrite_options) - return config +- return config ++ import ngraph_bridge ++ config_new = ngraph_bridge.update_config(config) ++ return config_new return ErrorLoggingSession(graph=graph, config=prepare_config(config)) + diff --git a/test/python/tensorflow/tf_unittest_runner.py b/test/python/tensorflow/tf_unittest_runner.py index 86b221d13..b4988758e 100644 --- a/test/python/tensorflow/tf_unittest_runner.py +++ b/test/python/tensorflow/tf_unittest_runner.py @@ -43,7 +43,7 @@ def main(): required.add_argument( '--tensorflow_path', help= - "Specify the path to Tensorflow source code. Eg:/localdisk/skantama/tf-ngraph/tensorflow \n", + "Specify the path to Tensorflow source code. Eg:ngraph-bridge/build_cmake/tensorflow \n", required=True) optional.add_argument( '--list_tests', diff --git a/test/python/test_updateconfig.py b/test/python/test_updateconfig.py new file mode 100644 index 000000000..bf1a6b3f9 --- /dev/null +++ b/test/python/test_updateconfig.py @@ -0,0 +1,48 @@ +# ============================================================================== +# Copyright 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""nGraph TensorFlow bridge update_config api test + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pytest +import os + +import tensorflow as tf +from tensorflow.core.protobuf import rewriter_config_pb2 + +from common import NgraphTest +import ngraph_bridge + + +class TestUpdateConfig(NgraphTest): + + @pytest.mark.skipif( + not ngraph_bridge.is_grappler_enabled(), reason='Only for Grappler') + def test_update_config(self): + config = tf.ConfigProto() + config.allow_soft_placement = True + config_new = ngraph_bridge.update_config(config) + rewriter_options = config_new.graph_options.rewrite_options + ngraph_optimizer_name = rewriter_options.custom_optimizers[0].name + assert ngraph_optimizer_name == 'ngraph-optimizer' + ngraph_optimizer = rewriter_options.custom_optimizers[0] + ngraph_optimizer.parameter_map["max_batch_size"].s = b'64' + ngraph_optimizer.parameter_map["ice_cores"].s = b'12' + assert config_new.__str__( + ) == 'allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n min_graph_nodes: -1\n custom_optimizers {\n name: "ngraph-optimizer"\n parameter_map {\n key: "device_id"\n value {\n s: ""\n }\n }\n parameter_map {\n key: "ice_cores"\n value {\n s: "12"\n }\n }\n parameter_map {\n key: "max_batch_size"\n value {\n s: "64"\n }\n }\n parameter_map {\n key: "ngraph_backend"\n value {\n s: "CPU"\n }\n }\n }\n }\n}\n' diff --git a/tools/test_utils.py b/tools/test_utils.py index 220d01242..c8d4eacc4 100755 --- a/tools/test_utils.py +++ b/tools/test_utils.py @@ -294,6 +294,7 @@ def run_resnet50(build_dir): junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd) # Check to see if we need to patch the repo for Grappler + # benchmark_cnn.patch will only work for the CPU backend patch_file = os.path.abspath( os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch")) import ngraph_bridge @@ -359,6 +360,7 @@ def run_resnet50_from_artifacts(ngraph_tf_src_dir, artifact_dir, batch_size, call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed']) # Check to see if we need to patch the repo for Grappler + # benchmark_cnn.patch will only work for the CPU backend patch_file = os.path.abspath( os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch")) import ngraph_bridge @@ -446,6 +448,7 @@ def run_resnet50_forward_pass(build_dir): junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd) # Check to see if we need to patch the repo for Grappler + # benchmark_cnn.patch will only work for the CPU backend patch_file = os.path.abspath( os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch")) import ngraph_bridge @@ -494,6 +497,7 @@ def run_resnet50_forward_pass_from_artifacts(ngraph_tf_src_dir, artifact_dir, call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed']) # Check to see if we need to patch the repo for Grappler + # benchmark_cnn.patch will only work for the CPU backend patch_file = os.path.abspath( os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch")) import ngraph_bridge