Skip to content

Commit

Permalink
feat: add explanationSpec to TrainingPipeline-based custom jobs
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 492054553
  • Loading branch information
matthew29tang authored and Copybara-Service committed Dec 1, 2022
1 parent 43a2679 commit 957703f
Show file tree
Hide file tree
Showing 5 changed files with 381 additions and 90 deletions.
3 changes: 3 additions & 0 deletions google/cloud/aiplatform/explain/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@
ExplanationParameters = explanation_compat.ExplanationParameters
FeatureNoiseSigma = explanation_compat.FeatureNoiseSigma

ExplanationSpec = explanation_compat.ExplanationSpec

# Classes used by ExplanationParameters
IntegratedGradientsAttribution = explanation_compat.IntegratedGradientsAttribution
SampledShapleyAttribution = explanation_compat.SampledShapleyAttribution
Expand All @@ -44,6 +46,7 @@

__all__ = (
"Encoding",
"ExplanationSpec",
"ExplanationMetadata",
"ExplanationParameters",
"FeatureNoiseSigma",
Expand Down
120 changes: 33 additions & 87 deletions google/cloud/aiplatform/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import gcs_utils
from google.cloud.aiplatform.utils import _explanation_utils
from google.cloud.aiplatform import model_evaluation

from google.cloud.aiplatform.compat.services import endpoint_service_client

from google.cloud.aiplatform.compat.types import (
Expand Down Expand Up @@ -617,10 +617,6 @@ def _validate_deploy_args(
deployed_model_display_name: Optional[str],
traffic_split: Optional[Dict[str, int]],
traffic_percentage: Optional[int],
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
):
"""Helper method to validate deploy arguments.
Expand Down Expand Up @@ -663,20 +659,10 @@ def _validate_deploy_args(
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
Raises:
ValueError: if Min or Max replica is negative. Traffic percentage > 100 or
< 0. Or if traffic_split does not sum to 100.
ValueError: if explanation_metadata is specified while explanation_parameters
is not.
"""
if min_replica_count < 0:
raise ValueError("Min replica cannot be negative.")
Expand All @@ -697,11 +683,6 @@ def _validate_deploy_args(
"Sum of all traffic within traffic split needs to be 100."
)

if bool(explanation_metadata) and not bool(explanation_parameters):
raise ValueError(
"To get model explanation, `explanation_parameters` must be specified."
)

# Raises ValueError if invalid accelerator
if accelerator_type:
utils.validate_accelerator_type(accelerator_type)
Expand Down Expand Up @@ -817,6 +798,9 @@ def deploy(
deployed_model_display_name=deployed_model_display_name,
traffic_split=traffic_split,
traffic_percentage=traffic_percentage,
)

explanation_spec = _explanation_utils.create_and_validate_explanation_spec(
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
Expand All @@ -832,8 +816,7 @@ def deploy(
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
explanation_spec=explanation_spec,
metadata=metadata,
sync=sync,
deploy_request_timeout=deploy_request_timeout,
Expand All @@ -854,10 +837,7 @@ def _deploy(
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
deploy_request_timeout: Optional[float] = None,
Expand Down Expand Up @@ -919,14 +899,8 @@ def _deploy(
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
explanation_spec (aiplatform.explain.ExplanationSpec):
Optional. Specification of Model explanation.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
Expand Down Expand Up @@ -963,8 +937,7 @@ def _deploy(
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
explanation_spec=explanation_spec,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
Expand Down Expand Up @@ -992,10 +965,7 @@ def _deploy_call(
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
Expand Down Expand Up @@ -1066,14 +1036,8 @@ def _deploy_call(
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
explanation_spec (aiplatform.explain.ExplanationSpec):
Optional. Specification of Model explanation.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
Expand Down Expand Up @@ -1199,13 +1163,7 @@ def _deploy_call(
"See https://cloud.google.com/vertex-ai/docs/reference/rpc/google.cloud.aiplatform.v1#google.cloud.aiplatform.v1.Model.FIELDS.repeated.google.cloud.aiplatform.v1.Model.DeploymentResourcesType.google.cloud.aiplatform.v1.Model.supported_deployment_resources_types"
)

# Service will throw error if explanation_parameters is not provided
if explanation_parameters:
explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec()
explanation_spec.parameters = explanation_parameters
if explanation_metadata:
explanation_spec.metadata = explanation_metadata
deployed_model.explanation_spec = explanation_spec
deployed_model.explanation_spec = explanation_spec

# Checking if traffic percentage is valid
# TODO(b/221059294) PrivateEndpoint should support traffic split
Expand Down Expand Up @@ -2332,6 +2290,9 @@ def deploy(
deployed_model_display_name=deployed_model_display_name,
traffic_split=None,
traffic_percentage=100,
)

explanation_spec = _explanation_utils.create_and_validate_explanation_spec(
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
Expand All @@ -2347,8 +2308,7 @@ def deploy(
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
explanation_spec=explanation_spec,
metadata=metadata,
sync=sync,
)
Expand Down Expand Up @@ -3004,11 +2964,6 @@ def upload(
if labels:
utils.validate_labels(labels)

if bool(explanation_metadata) and not bool(explanation_parameters):
raise ValueError(
"To get model explanation, `explanation_parameters` must be specified."
)

appended_user_agent = None
if local_model:
container_spec = local_model.get_serving_container_spec()
Expand Down Expand Up @@ -3109,13 +3064,12 @@ def upload(
if artifact_uri:
managed_model.artifact_uri = artifact_uri

# Override explanation_spec if required field is provided
if explanation_parameters:
explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec()
explanation_spec.parameters = explanation_parameters
if explanation_metadata:
explanation_spec.metadata = explanation_metadata
managed_model.explanation_spec = explanation_spec
managed_model.explanation_spec = (
_explanation_utils.create_and_validate_explanation_spec(
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
)

request = gca_model_service_compat.UploadModelRequest(
parent=initializer.global_config.common_location_path(project, location),
Expand Down Expand Up @@ -3283,8 +3237,6 @@ def deploy(
deployed_model_display_name=deployed_model_display_name,
traffic_split=traffic_split,
traffic_percentage=traffic_percentage,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)

if isinstance(endpoint, PrivateEndpoint):
Expand All @@ -3295,6 +3247,11 @@ def deploy(
"A maximum of one model can be deployed to each private Endpoint."
)

explanation_spec = _explanation_utils.create_and_validate_explanation_spec(
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)

return self._deploy(
endpoint=endpoint,
deployed_model_display_name=deployed_model_display_name,
Expand All @@ -3306,8 +3263,7 @@ def deploy(
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
explanation_spec=explanation_spec,
metadata=metadata,
encryption_spec_key_name=encryption_spec_key_name
or initializer.global_config.encryption_spec_key_name,
Expand All @@ -3331,10 +3287,7 @@ def _deploy(
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
explanation_spec: Optional[aiplatform.explain.ExplanationSpec] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
network: Optional[str] = None,
Expand Down Expand Up @@ -3398,14 +3351,8 @@ def _deploy(
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
explanation_spec (aiplatform.explain.ExplanationSpec):
Optional. Specification of Model explanation.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
Expand Down Expand Up @@ -3483,8 +3430,7 @@ def _deploy(
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
explanation_spec=explanation_spec,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
Expand Down
Loading

0 comments on commit 957703f

Please sign in to comment.