Skip to content

Commit

Permalink
Runtime model config reload gRPC API (#774)
Browse files Browse the repository at this point in the history
* Added model config reload gRPC API

* Update model_service_impl.cc

* Update model_service.proto
  • Loading branch information
Kindrat authored and Christopher Olston committed Mar 7, 2018
1 parent 5c3300e commit 79d3354
Show file tree
Hide file tree
Showing 5 changed files with 99 additions and 27 deletions.
13 changes: 13 additions & 0 deletions tensorflow_serving/apis/BUILD
Expand Up @@ -156,6 +156,18 @@ serving_go_grpc_library(
deps = [":prediction_service_go_proto"],
)

serving_proto_library(
name = "model_management_proto",
srcs = ["model_management.proto"],
cc_api_version = 2,
go_api_version = 2,
java_api_version = 2,
deps = [
"//tensorflow_serving/config:model_server_config_proto",
"//tensorflow_serving/util:status_proto",
],
)

serving_proto_library(
name = "get_model_status_proto",
srcs = ["get_model_status.proto"],
Expand Down Expand Up @@ -188,6 +200,7 @@ serving_proto_library(
java_api_version = 2,
deps = [
":get_model_status_proto",
":model_management_proto",
],
)

Expand Down
15 changes: 15 additions & 0 deletions tensorflow_serving/apis/model_management.proto
@@ -0,0 +1,15 @@
syntax = "proto3";

import "tensorflow_serving/config/model_server_config.proto";
import "tensorflow_serving/util/status.proto";

package tensorflow.serving;
option cc_enable_arenas = true;

message ReloadConfigRequest {
ModelServerConfig config = 1;
}

message ReloadConfigResponse {
StatusProto status = 1;
}
9 changes: 7 additions & 2 deletions tensorflow_serving/apis/model_service.proto
Expand Up @@ -3,15 +3,20 @@ syntax = "proto3";
option cc_enable_arenas = true;

import "tensorflow_serving/apis/get_model_status.proto";
import "tensorflow_serving/apis/model_management.proto";

package tensorflow.serving;

// ModelService provides access to information about model versions
// that have been handled by the model server.
// ModelService provides methods to query and update the state of the server, e.g.
// which models/versions are being served.
service ModelService {
// Gets status of model. If the ModelSpec in the request does not specify
// version, information about all versions of the model will be returned. If
// the ModelSpec in the request does specify a version, the status of only
// that version will be returned.
rpc GetModelStatus(GetModelStatusRequest) returns (GetModelStatusResponse);

// Reloads the set of served models. The new config supersedes the old one, so if a
// model is omitted from the new config it will be unloaded and no longer served.
rpc HandleReloadConfigRequest(ReloadConfigRequest) returns (ReloadConfigResponse);
}
62 changes: 48 additions & 14 deletions tensorflow_serving/model_servers/model_service_impl.cc
Expand Up @@ -17,20 +17,54 @@ limitations under the License.

#include "tensorflow_serving/model_servers/get_model_status_impl.h"
#include "tensorflow_serving/model_servers/grpc_status_util.h"
#include "tensorflow_serving/util/status_util.h"

namespace tensorflow {
namespace serving {

::grpc::Status ModelServiceImpl::GetModelStatus(
::grpc::ServerContext* context, const GetModelStatusRequest* request,
GetModelStatusResponse* response) {
const ::grpc::Status status = tensorflow::serving::ToGRPCStatus(
GetModelStatusImpl::GetModelStatus(core_, *request, response));
if (!status.ok()) {
VLOG(1) << "GetModelStatus failed: " << status.error_message();
}
return status;
}

} // namespace serving
namespace serving {

::grpc::Status ModelServiceImpl::GetModelStatus(::grpc::ServerContext *context,
const GetModelStatusRequest *request,
GetModelStatusResponse *response) {
const ::grpc::Status status = tensorflow::serving::ToGRPCStatus(
GetModelStatusImpl::GetModelStatus(core_, *request, response));
if (!status.ok()) {
VLOG(1) << "GetModelStatus failed: " << status.error_message();
}
return status;
}

::grpc::Status ModelServiceImpl::HandleReloadConfigRequest(::grpc::ServerContext *context,
const ReloadConfigRequest *request,
ReloadConfigResponse *response) {
ModelServerConfig server_config = request->config();
Status status;
switch (server_config.config_case()) {
case ModelServerConfig::kModelConfigList: {
const ModelConfigList list = server_config.model_config_list();

for (int index = 0; index < list.config_size(); index++) {
const ModelConfig config = list.config(index);
LOG(INFO) << "\nConfig entry"
<< "\n\tindex : " << index
<< "\n\tpath : " << config.base_path()
<< "\n\tname : " << config.name()
<< "\n\tplatform : " << config.model_platform();
}
status = core_->ReloadConfig(server_config);
break;
}
default:
status = errors::InvalidArgument("ServerModelConfig type not supported by HandleReloadConfigRequest. Only ModelConfigList is currently supported");
}

if (!status.ok()) {
LOG(ERROR) << "ReloadConfig failed: " << status.error_message();
}

const StatusProto status_proto = ToStatusProto(status);
*response->mutable_status() = status_proto;
return ToGRPCStatus(status);
}

} // namespace serving
} // namespace tensorflow
27 changes: 16 additions & 11 deletions tensorflow_serving/model_servers/model_service_impl.h
Expand Up @@ -21,23 +21,28 @@ limitations under the License.
#include "tensorflow_serving/apis/model_service.grpc.pb.h"
#include "tensorflow_serving/apis/model_service.pb.h"
#include "tensorflow_serving/model_servers/server_core.h"
#include "tensorflow_serving/apis/model_management.pb.h"

namespace tensorflow {
namespace serving {
namespace serving {

class ModelServiceImpl final : public ModelService::Service {
public:
explicit ModelServiceImpl(ServerCore* core) : core_(core) {}
class ModelServiceImpl final : public ModelService::Service {
public:
explicit ModelServiceImpl(ServerCore *core) : core_(core) {}

::grpc::Status GetModelStatus(::grpc::ServerContext* context,
const GetModelStatusRequest* request,
GetModelStatusResponse* response) override;
::grpc::Status GetModelStatus(::grpc::ServerContext *context,
const GetModelStatusRequest *request,
GetModelStatusResponse *response) override;

private:
ServerCore* core_;
};
::grpc::Status HandleReloadConfigRequest(::grpc::ServerContext *context,
const ReloadConfigRequest *request,
ReloadConfigResponse *response);

} // namespace serving
private:
ServerCore *core_;
};

} // namespace serving
} // namespace tensorflow

#endif // TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_SERVICE_IMPL_H_

0 comments on commit 79d3354

Please sign in to comment.