Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

modify default model fn for Elastic Inference #55

Merged
merged 1 commit into from Nov 29, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 7 additions & 1 deletion src/sagemaker_mxnet_container/serving.py
Expand Up @@ -23,6 +23,7 @@
logger = logging.getLogger(__name__)

PREFERRED_BATCH_SIZE_PARAM = 'SAGEMAKER_DEFAULT_MODEL_FIRST_DIMENSION_SIZE'
INFERENCE_ACCELERATOR_PRESENT_ENV = 'SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT'
DEFAULT_ENV_VARS = {
'MXNET_CPU_WORKER_NTHREADS': '1',
'MXNET_CPU_PRIORITY_NTHREADS': '1',
Expand Down Expand Up @@ -64,7 +65,12 @@ def default_model_fn(model_dir, preferred_batch_size=1):
sym, args, aux = mx.model.load_checkpoint(os.path.join(model_dir, DEFAULT_MODEL_NAME), 0)

# TODO mxnet ctx - better default, allow user control
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)
context = mx.cpu()

if os.environ.get(INFERENCE_ACCELERATOR_PRESENT_ENV) == 'true':
context = mx.eia()

mod = mx.mod.Module(symbol=sym, context=context, data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(args, aux, allow_missing=True)

Expand Down
31 changes: 31 additions & 0 deletions test/unit/test_serving.py
Expand Up @@ -57,6 +57,37 @@ def test_default_model_fn(path_exists, mx_load_checkpoint, mx_module, mx_cpu):
model.set_params.assert_called_with(args, aux, allow_missing=True)


@patch('mxnet.eia', create=True)
@patch('mxnet.mod.Module')
@patch('mxnet.model.load_checkpoint')
@patch('os.path.exists', return_value=True)
@patch.dict(os.environ, {'SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT': 'true'}, clear=True)
def test_default_model_accelerator_fn(path_exists, mx_load_checkpoint, mx_module, mx_eia):
sym = Mock()
args = Mock()
aux = Mock()
mx_load_checkpoint.return_value = [sym, args, aux]

eia_context = Mock()
mx_eia.return_value = eia_context

data_name = 'foo'
data_shape = [1]
signature = json.dumps([{'name': data_name, 'shape': data_shape}])

with patch('six.moves.builtins.open', mock_open(read_data=signature)):
default_model_fn(MODEL_DIR)

mx_load_checkpoint.assert_called_with(os.path.join(MODEL_DIR, 'model'), 0)

init_call = call(symbol=sym, context=eia_context, data_names=[data_name], label_names=None)
assert init_call in mx_module.mock_calls

model = mx_module.return_value
model.bind.assert_called_with(for_training=False, data_shapes=[(data_name, data_shape)])
model.set_params.assert_called_with(args, aux, allow_missing=True)


@patch('sagemaker_containers.beta.framework.functions.error_wrapper', lambda x, y: x)
def test_mxnet_transformer_init():
t = MXNetTransformer()
Expand Down