Skip to content

Commit

Permalink
Merge pull request #91 from DLHub-Argonne/tf2
Browse files Browse the repository at this point in the history
Added TF2 support
  • Loading branch information
WardLT committed Aug 24, 2020
2 parents 7034b37 + bd4708a commit c01b781
Show file tree
Hide file tree
Showing 4 changed files with 299 additions and 120 deletions.
96 changes: 80 additions & 16 deletions dlhub_sdk/models/servables/tensorflow.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import List

from dlhub_sdk.utils.types import compose_argument_block, simplify_numpy_dtype
from dlhub_sdk.models.servables import BaseServableModel
import tensorflow as tf
Expand All @@ -17,7 +19,7 @@ def _convert_dtype(arg_type):
return simplify_numpy_dtype(np.dtype(dtype.as_numpy_dtype))


def _read_tf_inputs_and_outputs(arg_def):
def _read_tf_v1_inputs_and_outputs(arg_def):
"""Create a DLHub-compatible description from a Google ProtoBuf description of
the inputs or outputs to a function
Expand All @@ -39,12 +41,9 @@ def _read_tf_inputs_and_outputs(arg_def):
# Append the node name
node_names.append(arg_def.name)

# Different case if it is a scalar or a tensor
if len(shape) == 0:
dlhub_arg_defs.append(compose_argument_block(_convert_dtype(arg_def.dtype), name))
else:
dlhub_arg_defs.append(compose_argument_block('ndarray', name,
shape, _convert_dtype(arg_def.dtype)))
# Make it an argument block
dlhub_arg_defs.append(compose_argument_block('ndarray', name, shape,
_convert_dtype(arg_def.dtype)))

# If the function has only one argument, return that
if len(dlhub_arg_defs) == 1:
Expand All @@ -58,6 +57,28 @@ def _read_tf_inputs_and_outputs(arg_def):
element_types=dlhub_arg_defs), list(node_names)


def _read_tf_v2_function_signature(signature: List[tf.Tensor]) -> dict:
"""Generate a DLHub type specification from a Tensor object
Args:
sig: Function signature as a list of tensor
Returns:
(dict) Type specification
"""

# Get the tensor shapes
output = []
for sig in signature:
if sig.dtype != tf.resource: # Sometimes gets added for single-input functions
output.append(compose_argument_block('ndarray', sig.name, shape=list(sig.shape),
item_type=simplify_numpy_dtype(
np.dtype(sig.dtype.as_numpy_dtype))))
if len(output) == 1:
return output[0]
else:
return compose_argument_block('tuple', 'Several tensors', element_types=output)


class TensorFlowModel(BaseServableModel):
"""Class for generating descriptions of a TensorFlow model
Expand Down Expand Up @@ -85,6 +106,13 @@ def create_model(cls, export_directory):
"""

output = cls()
if tf.__version__ < '2':
return output._create_v1(export_directory)
else:
return output._create_v2(export_directory)

def _create_v1(self, export_directory: str):
"""Internal method for `create_model` that is compatible with TF1"""

# Load in the model
with tf.Session() as sess:
Expand All @@ -95,27 +123,63 @@ def create_model(cls, export_directory):
# Build descriptions for each function in the description
for name, func_def in model_def.signature_def.items():
# Get the descriptions for the inputs and outputs
input_def, input_nodes = _read_tf_inputs_and_outputs(func_def.inputs)
output_def, output_nodes = _read_tf_inputs_and_outputs(func_def.outputs)
input_def, input_nodes = _read_tf_v1_inputs_and_outputs(func_def.inputs)
output_def, output_nodes = _read_tf_v1_inputs_and_outputs(func_def.outputs)

# Rename the default function
if name == tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
name = "run"

# Register the function with the DLHub schema
output.register_function(name, input_def, output_def,
method_details={'input_nodes': input_nodes,
'output_nodes': output_nodes})
self.register_function(name, input_def, output_def,
method_details={'input_nodes': input_nodes,
'output_nodes': output_nodes})

# Check if there is a run method
if 'run' not in output['servable']['methods']:
if 'run' not in self['servable']['methods']:
raise ValueError('There is no default servable for this model.\n'
' Make sure to use '
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY '
'when saving model.')

# Add tensorflow version and files
output.add_requirement('tensorflow', tf.__version__)
output.add_directory(export_directory, recursive=True)
self.add_requirement('tensorflow', tf.__version__)
self.add_directory(export_directory, recursive=True)

return self

def _create_v2(self, export_directory: str):
"""Internal method for `create_model` that is compatible with TF2"""

# Load in the directory
imported = tf.saved_model.load(export_directory)

# Check that signatures are defined correctly
if len(imported.signatures) == 0:
raise ValueError('SavedModel does not contain any function signatures. '
'Please re-save model with function signatures. '
'See: https://www.tensorflow.org/guide/saved_model'
'#specifying_signatures_during_export')
elif 'run' not in imported.signatures and \
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in imported.signatures:
raise ValueError('SavedModel must contain a function '
'named "run" or the default signature key')

# Build descriptions for each function in the description
for name, func_def in imported.signatures.items():
# Get the descriptions for the inputs and outputs
input_spec = _read_tf_v2_function_signature(func_def.inputs)
output_spec = _read_tf_v2_function_signature(func_def.outputs)

# Rename the default function
if name == tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
name = "run"

# Register the function with the DLHub schema
self.register_function(name, input_spec, output_spec)

# Add the files and tensorflow version
self.add_requirement('tensorflow', imported.tensorflow_version)
self.add_directory(export_directory, recursive=True)

return output
return self
215 changes: 120 additions & 95 deletions dlhub_sdk/models/servables/tests/test_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,99 @@
tf_export_path = os.path.join(os.path.dirname(__file__), 'tf-model')


def _make_model_v1():
"""Builds a graph and exports it using SavedModel"""
tf.reset_default_graph()

with tf.Session() as sess:
# Make two simple graphs, both of which will be served by TF
x = tf.placeholder('float', shape=(None, 3), name='Input')
z = tf.placeholder('float', shape=(), name='ScalarMultiple')
m = tf.Variable([1.0, 1.0, 1.0], name='Slopes')
y = m * x + 1
len_fun = tf.reduce_sum(y - x) # Returns the number of elements in the array
scale_mult = tf.multiply(z, x, name='scale_mult')

# Initialize the variables
init = tf.global_variables_initializer()
sess.run(init)

# Create the tool for saving the model to disk
builder = tf.saved_model.builder.SavedModelBuilder(tf_export_path)

# Make descriptions for the inputs and outputs
x_desc = tf.saved_model.utils.build_tensor_info(x)
y_desc = tf.saved_model.utils.build_tensor_info(y)
z_desc = tf.saved_model.utils.build_tensor_info(z)
len_fun_desc = tf.saved_model.utils.build_tensor_info(len_fun)
scale_mult_desc = tf.saved_model.utils.build_tensor_info(scale_mult)

# Make a signature for the functions to be served
func_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc},
outputs={'y': y_desc},
method_name='run'
)
len_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc},
outputs={'len': len_fun_desc},
method_name='length'
)
mult_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc, 'z': z_desc},
outputs={'scale_mult': scale_mult_desc},
method_name='scalar_multiply'
)

# Add the functions and the state of the graph to the builder
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_sig,
'length': len_sig,
'scalar_multiply': mult_sig
})

# Save the function
builder.save()


def _make_model_v2():
"""Builds and saves a custom module"""
class CustomModule(tf.Module):

def __init__(self):
super().__init__()
self.m = tf.Variable([1.0, 1.0, 1.0], name='slope')

@tf.function
def __call__(self, x):
y = self.m * x + 1
return y

@tf.function(input_signature=[tf.TensorSpec((None, 3), tf.float32)])
def length(self, x):
return tf.reduce_sum(self(x) - x, name='length')

@tf.function(input_signature=[tf.TensorSpec([], tf.float32),
tf.TensorSpec((None, 3), tf.float32)])
def scalar_multiply(self, z, x):
return tf.multiply(z, x, name='scale_mult')

module = CustomModule()

# Make a concrete version of __call__
call = module.__call__.get_concrete_function(tf.TensorSpec((None, 3)))

tf.saved_model.save(
module, tf_export_path, signatures={
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: call,
'length': module.length,
'scalar_multiply': module.scalar_multiply
}
)


class TestTensorflow(TestCase):

maxDiff = 4096
Expand All @@ -19,66 +112,12 @@ def setUp(self):
if os.path.isdir(tf_export_path):
shutil.rmtree(tf_export_path)

def make_model(self):

tf.reset_default_graph()

with tf.Session() as sess:

# Make two simple graphs, both of which will be served by TF
x = tf.placeholder('float', shape=(None, 3), name='Input')
z = tf.placeholder('float', shape=(), name='ScalarMultiple')
m = tf.Variable([1.0, 1.0, 1.0], name='Slopes')
y = m * x + 1
len_fun = tf.reduce_sum(y - x) # Returns the number of elements in the array
scale_mult = tf.multiply(z, x, name='scale_mult')

# Initialize the variables
init = tf.global_variables_initializer()
sess.run(init)

# Create the tool for saving the model to disk
builder = tf.saved_model.builder.SavedModelBuilder(tf_export_path)

# Make descriptions for the inputs and outputs
x_desc = tf.saved_model.utils.build_tensor_info(x)
y_desc = tf.saved_model.utils.build_tensor_info(y)
z_desc = tf.saved_model.utils.build_tensor_info(z)
len_fun_desc = tf.saved_model.utils.build_tensor_info(len_fun)
scale_mult_desc = tf.saved_model.utils.build_tensor_info(scale_mult)

# Make a signature for the functions to be served
func_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc},
outputs={'y': y_desc},
method_name='run'
)
len_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc},
outputs={'len': len_fun_desc},
method_name='length'
)
mult_sig = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': x_desc, 'z': z_desc},
outputs={'scale_mult': scale_mult_desc},
method_name='scalar_multiply'
)

# Add the functions and the state of the graph to the builder
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_sig,
'length': len_sig,
'scalar_multiply': mult_sig
})

# Save the function
builder.save()

def test_tf(self):
# Make a model and save it to disk
self.make_model()
if tf.__version__ < '2':
_make_model_v1()
else:
_make_model_v2()

# Create the description
model = TensorFlowModel.create_model(tf_export_path).set_title('TF Test')\
Expand All @@ -87,44 +126,30 @@ def test_tf(self):
# Generate the metadata for the test
metadata = model.to_dict(simplify_paths=True)

# Check whether the 'x' is listed first for the multiple-input model or second
self.assertEqual({'other': ['saved_model.pb',
os.path.join('variables', 'variables.data-00000-of-00001'),
os.path.join('variables', 'variables.index')]},
metadata['dlhub']['files'])
# Make sure the files are there
my_files = metadata['dlhub']['files']['other']
assert 'saved_model.pb' in my_files
assert os.path.join('variables', 'variables.data-00000-of-00001') in my_files
assert os.path.join('variables', 'variables.index') in my_files

# Check the tensorflow version
self.assertEqual(metadata['dlhub']['dependencies'],
{'python': {'tensorflow': tf.__version__}})
self.assertEqual(metadata['servable'],
{'methods':
{'run': {
'input': {'type': 'ndarray', 'description': 'x',
'shape': [None, 3], 'item_type': {'type': 'float'}},
'output': {'type': 'ndarray', 'description': 'y',
'shape': [None, 3], 'item_type': {'type': 'float'}},
'parameters': {},
'method_details': {'input_nodes': ['Input:0'],
'output_nodes': ['add:0']}
}, 'length': {
'input': {'type': 'ndarray', 'description': 'x',
'shape': [None, 3], 'item_type': {'type': 'float'}},
'output': {'type': 'float', 'description': 'len'},
'parameters': {},
'method_details': {'input_nodes': ['Input:0'],
'output_nodes': ['Sum:0']}
}, 'scalar_multiply': {
'input': {'type': 'tuple', 'description': 'Arguments',
'element_types': [
{'type': 'ndarray', 'description': 'x',
'shape': [None, 3], 'item_type': {'type': 'float'}},
{'type': 'float', 'description': 'z'}
]},
'output': {'type': 'ndarray', 'description': 'scale_mult',
'shape': [None, 3], 'item_type': {'type': 'float'}},
'parameters': {},
'method_details': {'input_nodes': ['Input:0', 'ScalarMultiple:0'],
'output_nodes': ['scale_mult:0']}
}},
'shim': 'tensorflow.TensorFlowServable',
'type': 'TensorFlow Model'})

# Check whether the 'x' is listed first for the multiple-input model or second
my_methods = metadata['servable']['methods']
assert my_methods['run']['input']['type'] == 'ndarray'
assert my_methods['run']['input']['shape'] == [None, 3]
assert my_methods['run']['input']['item_type'] == {'type': 'float'}

assert my_methods['scalar_multiply']['input']['type'] == 'tuple'
assert my_methods['scalar_multiply']['input']['element_types'][0]['shape'] == [None, 3]
assert my_methods['scalar_multiply']['input']['element_types'][1]['shape'] == []

assert 'length' in my_methods
assert 'scalar_multiply' in my_methods

# Check the shim
assert metadata['servable']['shim'] == 'tensorflow.TensorFlowServable'

validate_against_dlhub_schema(metadata, 'servable')

0 comments on commit c01b781

Please sign in to comment.