Skip to content

Commit

Permalink
Improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
javierdlrm committed Jan 27, 2022
1 parent 55f150e commit 15a3164
Show file tree
Hide file tree
Showing 17 changed files with 207 additions and 134 deletions.
31 changes: 6 additions & 25 deletions python/hsml/component_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,41 +29,22 @@
class ComponentConfig:
"""Configuration of a serving component (predictor or transformer)."""

REQUESTED_INSTANCES_KEY = None

def __init__(
self,
script_file: Optional[str] = None,
resources_config: Optional[Union[ResourcesConfig, dict]] = None,
resources_config: Optional[ResourcesConfig] = None,
inference_logger: Optional[Union[InferenceLoggerConfig, dict]] = None,
inference_batcher: Optional[Union[InferenceBatcherConfig, dict]] = None,
):
# check for dict params
resources_config = util.get_obj_from_json(
ResourcesConfig, resources_config, self.REQUESTED_INSTANCES_KEY
)
inference_logger = util.get_obj_from_json(
InferenceLoggerConfig, inference_logger
)
inference_batcher = util.get_obj_from_json(
InferenceBatcherConfig, inference_batcher
)

self._script_file = script_file
self._resources_config = (
resources_config
if resources_config is not None
else ResourcesConfig() # default
)
self._resources_config = resources_config
self._inference_logger = (
inference_logger
if inference_logger is not None
else InferenceLoggerConfig() # default
util.get_obj_from_json(InferenceLoggerConfig, inference_logger)
or InferenceLoggerConfig()
)
self._inference_batcher = (
inference_batcher
if inference_batcher is not None
else InferenceBatcherConfig() # default
util.get_obj_from_json(InferenceBatcherConfig, inference_batcher)
or InferenceBatcherConfig()
)

@abstractclassmethod
Expand Down
10 changes: 9 additions & 1 deletion python/hsml/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,16 @@ class RESOURCES:
GPUS = 0


class KAFKA_TOPIC_CONFIG:
NUM_REPLICAS = 1
NUM_PARTITIONS = 1


class INFERENCE_LOGGER:
MODE = "NONE"
MODE_NONE = "NONE"
MODE_ALL = "ALL"
MODE_MODEL_INPUTS = "MODEL_INPUTS"
MODE_PREDICTIONS = "PREDICTIONS"


class INFERENCE_BATCHER:
Expand Down
16 changes: 7 additions & 9 deletions python/hsml/core/serving_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class ServingApi:
def __init__(self):
pass

def get(self, id: int):
def get_by_id(self, id: int):
"""Get the metadata of a deployment with a certain id.
:param id: id of the deployment
Expand All @@ -41,7 +41,7 @@ def get(self, id: int):
deployment_json = _client._send_request("GET", path_params)
return deployment.Deployment.from_response_json(deployment_json)

def get_by_name(self, name: str):
def get(self, name: str):
"""Get the metadata of a deployment with a certain name.
:param name: name of the deployment
Expand All @@ -50,13 +50,11 @@ def get_by_name(self, name: str):
:rtype: Deployment
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"serving",
name, # TODO: Add endpoint in the backend for filtering by name
]
deployment_json = _client._send_request("GET", path_params)
path_params = ["project", _client._project_id, "serving"]
query_params = {"name": name}
deployment_json = _client._send_request(
"GET", path_params, query_params=query_params
)
return deployment.Deployment.from_response_json(deployment_json)

def get_all(self):
Expand Down
4 changes: 1 addition & 3 deletions python/hsml/inference_batcher_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@ class InferenceBatcherConfig:
"""Configuration for an inference batcher."""

def __init__(self, enabled: Optional[bool] = None):
self._enabled = (
enabled if enabled is not None else INFERENCE_BATCHER.ENABLED
) # default
self._enabled = enabled or INFERENCE_BATCHER.ENABLED

def describe(self):
util.pretty_print(self)
Expand Down
22 changes: 17 additions & 5 deletions python/hsml/inference_logger_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,27 @@ def __init__(
kafka_topic: Optional[Union[KafkaTopicConfig, dict]] = None,
mode: Optional[str] = None,
):
# check for dict params
kafka_topic = util.get_obj_from_json(KafkaTopicConfig, kafka_topic)

self._kafka_topic = kafka_topic
self._mode = mode if mode is not None else INFERENCE_LOGGER.MODE
self._kafka_topic = util.get_obj_from_json(KafkaTopicConfig, kafka_topic)
self._mode = self._validate_mode(mode) or (
INFERENCE_LOGGER.MODE_ALL
if self._kafka_topic is not None
else INFERENCE_LOGGER.MODE_NONE
)

def describe(self):
util.pretty_print(self)

def _validate_mode(self, mode):
if mode is not None:
modes = util.get_members(INFERENCE_LOGGER)
if mode not in modes:
raise ValueError(
"Inference logging mode {} is not valid. Possible values are {}".format(
mode, modes.join(", ")
)
)
return mode

@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
Expand Down
7 changes: 5 additions & 2 deletions python/hsml/kafka_topic_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from typing import Optional

from hsml import util
from hsml.constants import KAFKA_TOPIC_CONFIG


class KafkaTopicConfig:
Expand All @@ -30,8 +31,10 @@ def __init__(
topic_num_partitions: Optional[int] = None,
):
self._topic_name = topic_name
self._topic_num_replicas = topic_num_replicas
self._topic_num_partitions = topic_num_partitions
self._topic_num_replicas = topic_num_replicas or KAFKA_TOPIC_CONFIG.NUM_REPLICAS
self._topic_num_partitions = (
topic_num_partitions or KAFKA_TOPIC_CONFIG.NUM_PARTITIONS
)

def describe(self):
util.pretty_print(self)
Expand Down
4 changes: 2 additions & 2 deletions python/hsml/model_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def get_deployment_by_id(self, id: int):
`RestAPIError`: If unable to retrieve deployment from model serving.
"""

return self._serving_api.get(id)
return self._serving_api.get_by_id(id)

def get_deployment(self, name: str):
"""Get a deployment entity from model serving by name.
Expand All @@ -61,7 +61,7 @@ def get_deployment(self, name: str):
`RestAPIError`: If unable to retrieve deployment from model serving.
"""

return self._serving_api.get_by_name(name)
return self._serving_api.get(name)

def get_deployments(self):
"""Get all deployments from model serving.
Expand Down
2 changes: 1 addition & 1 deletion python/hsml/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def extract_fields_from_json(cls, json_decamelized):
json_decamelized.pop("model_name")
if "model_name" in json_decamelized
else name
) # TODO: FIX THIS, rebase branch. IT'S not NAME of SERVING!
)
mp = json_decamelized.pop("model_path")
mv = json_decamelized.pop("model_version")
av = json_decamelized.pop("artifact_version")
Expand Down
35 changes: 21 additions & 14 deletions python/hsml/predictor_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,37 +20,48 @@

from hsml.constants import PREDICTOR
from hsml.component_config import ComponentConfig
from hsml.resources_config import ResourcesConfig
from hsml.resources_config import PredictorResourcesConfig
from hsml.inference_logger_config import InferenceLoggerConfig
from hsml.inference_batcher_config import InferenceBatcherConfig


class PredictorConfig(ComponentConfig):
"""Configuration object attached to a Predictor."""

REQUESTED_INSTANCES_KEY: str = "requested_instances"

def __init__(
self,
model_server: str,
serving_tool: Optional[str] = None,
script_file: Optional[str] = None,
resources_config: Optional[Union[ResourcesConfig, dict]] = None,
resources_config: Optional[Union[PredictorResourcesConfig, dict]] = None,
inference_logger: Optional[Union[InferenceLoggerConfig, dict]] = None,
inference_batcher: Optional[Union[InferenceBatcherConfig, dict]] = None,
):
resources_config = (
util.get_obj_from_json(PredictorResourcesConfig, resources_config)
or PredictorResourcesConfig()
)

super().__init__(
script_file, resources_config, inference_logger, inference_batcher
)

self._model_server = model_server
self._serving_tool = (
serving_tool if serving_tool is not None else PREDICTOR.SERVING_TOOL
)
self._model_server = self._validate_model_server(model_server)
self._serving_tool = serving_tool or PREDICTOR.SERVING_TOOL_KFSERVING

def describe(self):
util.pretty_print(self)

def _validate_model_server(self, model_server):
model_servers = util.get_members(PREDICTOR, prefix="SERVING_TOOL")
if model_server not in model_servers:
raise ValueError(
"Model server {} is not valid. Possible values are {}".format(
model_server, model_servers.join(", ")
)
)
return model_server

@classmethod
def for_model(cls, model):
return util.get_predictor_config_for_model(model)
Expand All @@ -68,11 +79,7 @@ def extract_fields_from_json(cls, json_decamelized):
if "predictor" in json_decamelized
else None
)
rc = (
ResourcesConfig.from_json(json_decamelized, cls.REQUESTED_INSTANCES_KEY)
if cls.REQUESTED_INSTANCES_KEY in json_decamelized
else None
)
rc = PredictorResourcesConfig.from_json(json_decamelized)
il = InferenceLoggerConfig.from_json(json_decamelized)
ib = InferenceBatcherConfig.from_json(json_decamelized)
return ms, st, sf, rc, il, ib
Expand All @@ -87,7 +94,7 @@ def to_dict(self):
"modelServer": self._model_server,
"servingTool": self._serving_tool,
"predictor": self._script_file,
**self._resources_config.to_dict("requestedInstances"),
**self._resources_config.to_dict(),
**self._inference_logger.to_dict(),
**self._inference_batcher.to_dict(),
}
Expand Down
22 changes: 17 additions & 5 deletions python/hsml/predictor_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(
self._external_ip = external_ip
self._external_port = external_port
self._revision = revision
self._deployed = deployed if deployed is not None else False
self._deployed = deployed or False
self._conditions = conditions
self._status = status

Expand All @@ -58,14 +58,26 @@ def from_response_json(cls, json_dict):

@classmethod
def extract_fields_from_json(cls, json_decamelized):
ai = json_decamelized.pop("available_instances")
ai = (
json_decamelized.pop("available_instances")
if "available_instances" in json_decamelized
else None
)
ati = (
json_decamelized.pop("available_transformer_instances")
if "available_transformer_instances" in json_decamelized
else None
)
ii = json_decamelized.pop("internal_ips")
iph = json_decamelized.pop("internal_path")
ii = (
json_decamelized.pop("internal_ips")
if "internal_ips" in json_decamelized
else None
)
iph = (
json_decamelized.pop("internal_path")
if "internal_path" in json_decamelized
else None
)
ipt = (
json_decamelized.pop("internal_port")
if "internal_port" in json_decamelized
Expand All @@ -88,7 +100,7 @@ def extract_fields_from_json(cls, json_decamelized):
if "conditions" in json_decamelized
else None
)
s = json_decamelized.pop("status")
s = json_decamelized.pop("status") if "status" in json_decamelized else None

return ai, ati, ii, iph, ipt, ei, ep, r, d, c, s

Expand Down
14 changes: 9 additions & 5 deletions python/hsml/python/predictor_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union, Optional

from hsml.constants import PREDICTOR
from hsml.predictor_config import PredictorConfig
from hsml.resources_config import PredictorResourcesConfig
from hsml.inference_batcher_config import InferenceBatcherConfig
from hsml.inference_logger_config import InferenceLoggerConfig


class PredictorConfig(PredictorConfig):
"""Configuration for a predictor running a python model."""

def __init__(
self,
serving_tool=PREDICTOR.SERVING_TOOL_KFSERVING,
script_file=None,
resources_config=None,
inference_logger=None,
inference_batcher=None,
serving_tool: Optional[str] = PREDICTOR.SERVING_TOOL_KFSERVING,
script_file: Optional[str] = None,
resources_config: Optional[Union[PredictorResourcesConfig, dict]] = None,
inference_logger: Optional[Union[InferenceLoggerConfig, dict]] = None,
inference_batcher: Optional[Union[InferenceBatcherConfig, dict]] = None,
):
super().__init__(
model_server=PREDICTOR.MODEL_SERVER_PYTHON,
Expand Down
Loading

0 comments on commit 15a3164

Please sign in to comment.