From 9204dabf36e1a7aaca1a8f4dccd3a0187e0abd6a Mon Sep 17 00:00:00 2001 From: Giuseppe Angelo Porcelli Date: Thu, 30 Apr 2020 17:29:01 +0000 Subject: [PATCH 1/2] Fixed handler service to allow running custom user modules in multi-model mode. --- .../handler_service.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/sagemaker_pytorch_serving_container/handler_service.py b/src/sagemaker_pytorch_serving_container/handler_service.py index 6752dfdf..f58b4c1e 100644 --- a/src/sagemaker_pytorch_serving_container/handler_service.py +++ b/src/sagemaker_pytorch_serving_container/handler_service.py @@ -17,6 +17,10 @@ from sagemaker_pytorch_serving_container.default_inference_handler import \ DefaultPytorchInferenceHandler +import os +import sys + +ENABLE_MULTI_MODEL = os.getenv("SAGEMAKER_MULTI_MODEL", "false") == "true" class HandlerService(DefaultHandlerService): """Handler service that is executed by the model server. @@ -31,5 +35,18 @@ class HandlerService(DefaultHandlerService): """ def __init__(self): + self._initialized = False + transformer = Transformer(default_inference_handler=DefaultPytorchInferenceHandler()) super(HandlerService, self).__init__(transformer=transformer) + + + def initialize(self, context): + + # Adding the 'code' directory path to sys.path to allow importing user modules when multi-model mode is enabled. + if (not self._initialized) and ENABLE_MULTI_MODEL: + code_dir = os.path.join(context.system_properties.get("model_dir"), 'code') + sys.path.append(code_dir) + self._initialized = True + + super().initialize(context) \ No newline at end of file From 71675fa86eaca14d941a35cf3e774ce14865b722 Mon Sep 17 00:00:00 2001 From: Giuseppe Angelo Porcelli Date: Thu, 30 Apr 2020 18:06:16 +0000 Subject: [PATCH 2/2] Fixed flake8 errors. --- .../handler_service.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/sagemaker_pytorch_serving_container/handler_service.py b/src/sagemaker_pytorch_serving_container/handler_service.py index f58b4c1e..408758d9 100644 --- a/src/sagemaker_pytorch_serving_container/handler_service.py +++ b/src/sagemaker_pytorch_serving_container/handler_service.py @@ -22,7 +22,9 @@ ENABLE_MULTI_MODEL = os.getenv("SAGEMAKER_MULTI_MODEL", "false") == "true" + class HandlerService(DefaultHandlerService): + """Handler service that is executed by the model server. Determines specific default inference handlers to use based on the type MXNet model being used. @@ -36,17 +38,15 @@ class HandlerService(DefaultHandlerService): """ def __init__(self): self._initialized = False - + transformer = Transformer(default_inference_handler=DefaultPytorchInferenceHandler()) super(HandlerService, self).__init__(transformer=transformer) - def initialize(self, context): - # Adding the 'code' directory path to sys.path to allow importing user modules when multi-model mode is enabled. if (not self._initialized) and ENABLE_MULTI_MODEL: code_dir = os.path.join(context.system_properties.get("model_dir"), 'code') sys.path.append(code_dir) self._initialized = True - - super().initialize(context) \ No newline at end of file + + super().initialize(context)