Skip to content

Commit

Permalink
[node] Add SavedModel execution (#2336)
Browse files Browse the repository at this point in the history
FEATURE
DEV
* implement single tensor input

* add tests

* add comment, deallocate tensor

* fix lint

* add test model for gpu

* add comment for tf_Graph

* address comments

* fix lint

* add doc for helper function

* save

* save

* check in test objects

* fix format

* use tensor name in signatureDef

* address comments

* fix lint

* address comments

* fix lint

* join by ,
  • Loading branch information
Kangyi Zhang committed Nov 13, 2019
1 parent 40f8599 commit 476fa7e
Show file tree
Hide file tree
Showing 25 changed files with 490 additions and 85 deletions.
Binary file not shown.
Binary file not shown.
Binary file not shown.
260 changes: 223 additions & 37 deletions tfjs-node/binding/tfjs_backend.cc
Expand Up @@ -748,7 +748,8 @@ TFJSBackend::~TFJSBackend() {
}
for (auto &kv : tf_savedmodel_map_) {
TF_AutoStatus tf_status;
TF_DeleteSession(kv.second, tf_status.status);
TF_DeleteSession(kv.second.first, tf_status.status);
TF_DeleteGraph(kv.second.second);
}
if (tfe_context_ != nullptr) {
TFE_DeleteContext(tfe_context_);
Expand All @@ -762,9 +763,13 @@ int32_t TFJSBackend::InsertHandle(TFE_TensorHandle *tfe_handle) {
.first->first;
}

int32_t TFJSBackend::InsertSavedModel(TF_Session *tf_session) {
int32_t TFJSBackend::InsertSavedModel(TF_Session *tf_session,
TF_Graph *tf_graph) {
// Both TF_Session and TF_Graph are required when executing SavedModel.
// TF_Graph is used to find input/output operation from string name.
return tf_savedmodel_map_
.insert(std::make_pair(next_savedmodel_id_++, tf_session))
.insert(std::make_pair(next_savedmodel_id_++,
std::make_pair(tf_session, tf_graph)))
.first->first;
}

Expand Down Expand Up @@ -919,45 +924,56 @@ napi_value TFJSBackend::ExecuteOp(napi_env env, napi_value op_name_value,
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

for (int32_t i = 0; i < num_outputs; i++) {
// Output tensor info object:
napi_value tensor_info_value;
nstatus = napi_create_object(env, &tensor_info_value);
TFE_TensorHandle *handle = result_handles[i];
napi_value tensor_info_value = GenerateOutputTensorInfo(env, handle);
// Push into output array
nstatus = napi_set_element(env, output_tensor_infos, i, tensor_info_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
}

TFE_TensorHandle *handle = result_handles[i];
return output_tensor_infos;
}

// Output tensor ID:
napi_value output_tensor_id_value;
nstatus =
napi_create_int32(env, InsertHandle(handle), &output_tensor_id_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
/* Helper function to generate TensorInfo(used for JavaScript) from
* TFE_TensorHandle. This helper function is used by ExecuteOp() and
* RunSavedModel().
*/
napi_value TFJSBackend::GenerateOutputTensorInfo(napi_env env,
TFE_TensorHandle *handle) {
napi_status nstatus;

nstatus = napi_set_named_property(env, tensor_info_value, "id",
output_tensor_id_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
// Output tensor info object:
napi_value tensor_info_value;
nstatus = napi_create_object(env, &tensor_info_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Output tensor shape:
napi_value shape_value;
GetTFE_TensorHandleShape(env, handle, &shape_value);
// Output tensor ID:
napi_value output_tensor_id_value;
nstatus =
napi_create_int32(env, InsertHandle(handle), &output_tensor_id_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

nstatus =
napi_set_named_property(env, tensor_info_value, "shape", shape_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
nstatus = napi_set_named_property(env, tensor_info_value, "id",
output_tensor_id_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Output tensor dtype:
napi_value type_value;
GetTFE_TensorHandleType(env, handle, &type_value);
// Output tensor shape:
napi_value shape_value;
GetTFE_TensorHandleShape(env, handle, &shape_value);

nstatus =
napi_set_named_property(env, tensor_info_value, "dtype", type_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
nstatus =
napi_set_named_property(env, tensor_info_value, "shape", shape_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Push into output array
nstatus = napi_set_element(env, output_tensor_infos, i, tensor_info_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
}
// Output tensor dtype:
napi_value type_value;
GetTFE_TensorHandleType(env, handle, &type_value);

return output_tensor_infos;
nstatus =
napi_set_named_property(env, tensor_info_value, "dtype", type_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

return tensor_info_value;
}

napi_value TFJSBackend::LoadSavedModel(napi_env env,
Expand Down Expand Up @@ -993,7 +1009,6 @@ napi_value TFJSBackend::LoadSavedModel(napi_env env,
TF_DeleteSessionOptions(session_options);
TF_DeleteBuffer(run_options);
TF_DeleteBuffer(metagraph);
TF_DeleteGraph(graph);

if (TF_GetCode(tf_status.status) != TF_OK) {
NAPI_THROW_ERROR(env, "Failed to load SavedModel: %s",
Expand All @@ -1002,8 +1017,8 @@ napi_value TFJSBackend::LoadSavedModel(napi_env env,
}

napi_value output_session_id;
nstatus =
napi_create_int32(env, InsertSavedModel(session), &output_session_id);
nstatus = napi_create_int32(env, InsertSavedModel(session, graph),
&output_session_id);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
return output_session_id;
}
Expand All @@ -1017,19 +1032,190 @@ void TFJSBackend::DeleteSavedModel(napi_env env,
auto savedmodel_entry = tf_savedmodel_map_.find(savedmodel_id);
if (savedmodel_entry == tf_savedmodel_map_.end()) {
NAPI_THROW_ERROR(
env, "Delete called on a SavedModel not referenced (savedmodel_id: %d)",
env, "Delete called on a SavedModel not found (savedmodel_id: %d)",
savedmodel_id);
return;
}

TF_AutoStatus tf_status;
TF_DeleteSession(savedmodel_entry->second, tf_status.status);
TF_DeleteSession(savedmodel_entry->second.first, tf_status.status);
if (TF_GetCode(tf_status.status) != TF_OK) {
NAPI_THROW_ERROR(env, "Failed to delete SavedModel: %s",
TF_Message(tf_status.status));
return;
}
// TODO(kangyizhang): Add tests to validate TF_Session and TF_Graph are
// deleted.
TF_DeleteGraph(savedmodel_entry->second.second);
tf_savedmodel_map_.erase(savedmodel_entry);
}

napi_value TFJSBackend::RunSavedModel(napi_env env,
napi_value savedmodel_id_value,
napi_value input_tensor_ids,
napi_value input_op_names_value,
napi_value output_op_names_value) {
napi_status nstatus;
TF_AutoStatus tf_status;

int32_t savedmodel_id;
nstatus = napi_get_value_int32(env, savedmodel_id_value, &savedmodel_id);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Get corresponding SavedModel session and graph.
auto savedmodel_entry = tf_savedmodel_map_.find(savedmodel_id);
if (savedmodel_entry == tf_savedmodel_map_.end()) {
NAPI_THROW_ERROR(env, "SavedModel ID not found (savedmodel_id: %d)",
savedmodel_id);
return nullptr;
}

std::string input_op_names;
nstatus = GetStringParam(env, input_op_names_value, input_op_names);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
std::string output_op_names;
nstatus = GetStringParam(env, output_op_names_value, output_op_names);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Get input/output op names as vector
std::vector<const char *> input_op_name_array =
splitStringByComma(input_op_names);
std::vector<const char *> output_op_name_array =
splitStringByComma(output_op_names);

std::vector<TF_Output> inputs;
std::vector<TF_Output> outputs;

uint32_t num_input_ids;
nstatus = napi_get_array_length(env, input_tensor_ids, &num_input_ids);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

if (input_op_name_array.size() != num_input_ids) {
NAPI_THROW_ERROR(env,
"Length of input op names (%d) does not match the length "
"of input tensors (%d).",
input_op_name_array.size(), num_input_ids);
return nullptr;
}

std::vector<TF_Tensor *> input_values;

for (uint32_t i = 0; i < num_input_ids; i++) {
napi_value cur_input_id;
nstatus = napi_get_element(env, input_tensor_ids, i, &cur_input_id);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

int32_t cur_input_tensor_id;
nstatus = napi_get_value_int32(env, cur_input_id, &cur_input_tensor_id);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Find input tensor based on tensor id.
auto tensor_entry = tfe_handle_map_.find(cur_input_tensor_id);
if (tensor_entry == tfe_handle_map_.end()) {
NAPI_THROW_ERROR(env, "Input Tensor ID not found (tensor_id: %d)",
cur_input_tensor_id);
return nullptr;
}
TF_Tensor *inputTensor =
TFE_TensorHandleResolve(tensor_entry->second, tf_status.status);

if (TF_GetCode(tf_status.status) != TF_OK) {
NAPI_THROW_ERROR(
env, "Failed to get input tensor (tensor_id: %d) for session.",
cur_input_tensor_id);
return nullptr;
}

// Add input tensor into input values list.
input_values.push_back(inputTensor);

// The item in input_op_name_array is something like "serving_default_x:0".
// Parse it into input op name and index for provided tensor.
char name[strlen(input_op_name_array[i])];
strcpy(name, input_op_name_array[i]);
char *input_op_name = strtok(name, ":");
char *input_op_index = strtok(NULL, ":");
int input_tensor_index;
if (strlen(input_op_index) == 0) {
input_tensor_index = 0;
} else {
input_tensor_index = atoi(input_op_index);
}

// Add input op into input ops list.
// TODO(kangyizhang): Store these TF_Operations somewhere so they don't need
// to be generated every time.
TF_Operation *input_op =
TF_GraphOperationByName(savedmodel_entry->second.second, input_op_name);
if (input_op == nullptr) {
NAPI_THROW_ERROR(env, "Input op name can not be found in the graph.");
return nullptr;
}
TF_Output in = {input_op, input_tensor_index};
inputs.push_back(in);
}

// Add output op into output ops list.
for (uint32_t i = 0; i < output_op_name_array.size(); i++) {
// The item in output_op_name_array is something like
// "StatefulPartitionedCall:0". Parse it into output op name and index.
char name[strlen(output_op_name_array[i])];
strcpy(name, output_op_name_array[i]);
char *output_op_name = strtok(name, ":");
char *output_op_index = strtok(NULL, ":");
int output_tensor_index;
if (strlen(output_op_index) == 0) {
output_tensor_index = 0;
} else {
output_tensor_index = atoi(output_op_index);
}

TF_Operation *output_op = TF_GraphOperationByName(
savedmodel_entry->second.second, output_op_name);
if (output_op == nullptr) {
NAPI_THROW_ERROR(env, "Output op name can not be found in the graph.");
return nullptr;
}
TF_Output out = {output_op, output_tensor_index};
outputs.push_back(out);
}

std::vector<TF_Tensor *> output_values(outputs.size(), nullptr);

TF_SessionRun(savedmodel_entry->second.first, nullptr, inputs.data(),
input_values.data(), num_input_ids, outputs.data(),
output_values.data(), output_op_name_array.size(), nullptr, 0,
nullptr, tf_status.status);

if (TF_GetCode(tf_status.status) != TF_OK) {
NAPI_THROW_ERROR(env, "Session fail to run with error: %s",
TF_Message(tf_status.status));
return nullptr;
}

napi_value output_tensor_infos;
nstatus = napi_create_array_with_length(env, 1, &output_tensor_infos);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);

// Generate output tensors for JS.
for (uint32_t i = 0; i < output_op_name_array.size(); i++) {
TFE_TensorHandle *tfe_handle =
TFE_NewTensorHandle(output_values[i], tf_status.status);
// Deallocate output TF_Tensor in C++.
TF_DeleteTensor(output_values[i]);

napi_value tensor_info_value = GenerateOutputTensorInfo(env, tfe_handle);
// Push into output array
nstatus = napi_set_element(env, output_tensor_infos, i, tensor_info_value);
ENSURE_NAPI_OK_RETVAL(env, nstatus, nullptr);
}

for (uint32_t i = 0; i < num_input_ids; i++) {
// Deallocate input TF_Tensor in C++.
TF_DeleteTensor(input_values[i]);
}

return output_tensor_infos;
}

} // namespace tfnodejs
20 changes: 16 additions & 4 deletions tfjs-node/binding/tfjs_backend.h
Expand Up @@ -19,9 +19,9 @@
#define TF_NODEJS_TFJS_BACKEND_H_

#include <node_api.h>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"

Expand Down Expand Up @@ -70,16 +70,28 @@ class TFJSBackend {
// - saved_model_id (number)
void DeleteSavedModel(napi_env env, napi_value saved_model_id);

// Execute a session from SavedModel with the provided inputs:
// - saved_model_id (number)
// - input_tensor_ids (array of input tensor IDs)
// - input_op_names (array of input op names)
// - output_op_names (array of output op names)
napi_value RunSavedModel(napi_env env, napi_value saved_model_id,
napi_value input_tensor_ids,
napi_value input_op_names,
napi_value output_op_names);

private:
TFJSBackend(napi_env env);
~TFJSBackend();

int32_t InsertHandle(TFE_TensorHandle *tfe_handle);
int32_t InsertSavedModel(TF_Session *tf_session);
int32_t InsertSavedModel(TF_Session *tf_session, TF_Graph *tf_graph);
napi_value GenerateOutputTensorInfo(napi_env env, TFE_TensorHandle *handle);

TFE_Context *tfe_context_;
std::map<int32_t, TFE_TensorHandle *> tfe_handle_map_;
std::map<int32_t, TF_Session *> tf_savedmodel_map_;
std::unordered_map<int32_t, TFE_TensorHandle *> tfe_handle_map_;
std::unordered_map<int32_t, std::pair<TF_Session *, TF_Graph *>>
tf_savedmodel_map_;
int32_t next_tensor_id_;
int32_t next_savedmodel_id_;
std::string device_name;
Expand Down

0 comments on commit 476fa7e

Please sign in to comment.