Skip to content

Commit

Permalink
feat: add dedicated_resources to DeployedIndex in aiplatform v1beta1 …
Browse files Browse the repository at this point in the history
…index_endpoint.proto feat: add Scaling to OnlineServingConfig in aiplatform v1beta1 featurestore.proto chore: sort imports (#991)

* feat: add dedicated_resources to DeployedIndex in aiplatform v1beta1 index_endpoint.proto feat: add Scaling to OnlineServingConfig in aiplatform v1beta1 featurestore.proto chore: sort imports

PiperOrigin-RevId: 425395202

Source-Link: googleapis/googleapis@e3bcc1e

Source-Link: googleapis/googleapis-gen@62beef7
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjJiZWVmNzg1NTljOGJhYjQ3ZWNkMzZlZWQxYWE2YjY3OGRiNjA4OCJ9

* 🦉 Updates from OwlBot

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
Co-authored-by: Yu-Han Liu <yuhanliu@google.com>
  • Loading branch information
3 people committed Jan 31, 2022
1 parent a814923 commit 7a7f0d4
Show file tree
Hide file tree
Showing 9 changed files with 53 additions and 25 deletions.
Expand Up @@ -255,9 +255,7 @@ async def create_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down Expand Up @@ -328,9 +326,7 @@ async def get_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down Expand Up @@ -548,9 +544,7 @@ async def lookup_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down
Expand Up @@ -478,9 +478,7 @@ def create_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down Expand Up @@ -551,9 +549,7 @@ def get_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down Expand Up @@ -771,9 +767,7 @@ def lookup_study(
Returns:
google.cloud.aiplatform_v1beta1.types.Study:
LINT.IfChange
A message representing a Study.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
Expand Down
1 change: 1 addition & 0 deletions google/cloud/aiplatform_v1beta1/types/custom_job.py
Expand Up @@ -120,6 +120,7 @@ class CustomJob(proto.Message):

class CustomJobSpec(proto.Message):
r"""Represents the spec of a CustomJob.
Next Id: 14
Attributes:
worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]):
Expand Down
34 changes: 30 additions & 4 deletions google/cloud/aiplatform_v1beta1/types/featurestore.py
Expand Up @@ -81,13 +81,39 @@ class OnlineServingConfig(proto.Message):
Attributes:
fixed_node_count (int):
The number of nodes for each cluster. The
number of nodes will not scale automatically but
can be scaled manually by providing different
values when updating.
The number of nodes for each cluster. The number of nodes
will not scale automatically but can be scaled manually by
providing different values when updating. Only one of
``fixed_node_count`` and ``scaling`` can be set. Setting one
will reset the other.
scaling (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig.Scaling):
Online serving scaling configuration. Only one of
``fixed_node_count`` and ``scaling`` can be set. Setting one
will reset the other.
"""

class Scaling(proto.Message):
r"""Online serving scaling configuration. If min_node_count and
max_node_count are set to the same value, the cluster will be
configured with the fixed number of node (no auto-scaling).
Attributes:
min_node_count (int):
Required. The minimum number of nodes to
scale down to. Must be greater than or equal to
1.
max_node_count (int):
The maximum number of nodes to scale up to. Must be greater
or equal to min_node_count.
"""

min_node_count = proto.Field(proto.INT32, number=1,)
max_node_count = proto.Field(proto.INT32, number=2,)

fixed_node_count = proto.Field(proto.INT32, number=2,)
scaling = proto.Field(
proto.MESSAGE, number=4, message="Featurestore.OnlineServingConfig.Scaling",
)

name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
Expand Down
12 changes: 12 additions & 0 deletions google/cloud/aiplatform_v1beta1/types/index_endpoint.py
Expand Up @@ -171,6 +171,15 @@ class DeployedIndex(proto.Message):
don't provide SLA when min_replica_count=1). If
max_replica_count is not set, the default value is
min_replica_count. The max allowed replica count is 1000.
dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources):
Optional. A description of resources that are dedicated to
the DeployedIndex, and that need a higher degree of manual
configuration. If min_replica_count is not set, the default
value is 2 (we don't provide SLA when min_replica_count=1).
If max_replica_count is not set, the default value is
min_replica_count. The max allowed replica count is 1000.
Available machine types: n1-standard-16 n1-standard-32
enable_access_logging (bool):
Optional. If true, private endpoint's access
logs are sent to StackDriver Logging.
Expand Down Expand Up @@ -227,6 +236,9 @@ class DeployedIndex(proto.Message):
automatic_resources = proto.Field(
proto.MESSAGE, number=7, message=machine_resources.AutomaticResources,
)
dedicated_resources = proto.Field(
proto.MESSAGE, number=16, message=machine_resources.DedicatedResources,
)
enable_access_logging = proto.Field(proto.BOOL, number=8,)
deployed_index_auth_config = proto.Field(
proto.MESSAGE, number=9, message="DeployedIndexAuthConfig",
Expand Down
10 changes: 5 additions & 5 deletions google/cloud/aiplatform_v1beta1/types/model.py
Expand Up @@ -397,7 +397,7 @@ class ModelContainerSpec(proto.Message):
r"""Specification of a container for serving predictions. Some fields in
this message correspond to fields in the `Kubernetes Container v1
core
specification <https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core>`__.
specification <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core>`__.
Attributes:
image_uri (str):
Expand Down Expand Up @@ -463,7 +463,7 @@ class ModelContainerSpec(proto.Message):
this syntax with ``$$``; for example: $$(VARIABLE_NAME) This
field corresponds to the ``command`` field of the Kubernetes
Containers `v1 core
API <https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core>`__.
API <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core>`__.
args (Sequence[str]):
Immutable. Specifies arguments for the command that runs
when the container starts. This overrides the container's
Expand Down Expand Up @@ -502,7 +502,7 @@ class ModelContainerSpec(proto.Message):
this syntax with ``$$``; for example: $$(VARIABLE_NAME) This
field corresponds to the ``args`` field of the Kubernetes
Containers `v1 core
API <https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core>`__.
API <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core>`__.
env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]):
Immutable. List of environment variables to set in the
container. After the container starts running, code running
Expand Down Expand Up @@ -535,7 +535,7 @@ class ModelContainerSpec(proto.Message):
This field corresponds to the ``env`` field of the
Kubernetes Containers `v1 core
API <https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core>`__.
API <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core>`__.
ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]):
Immutable. List of ports to expose from the container.
Vertex AI sends any prediction requests that it receives to
Expand All @@ -558,7 +558,7 @@ class ModelContainerSpec(proto.Message):
Vertex AI does not use ports other than the first one
listed. This field corresponds to the ``ports`` field of the
Kubernetes Containers `v1 core
API <https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#container-v1-core>`__.
API <https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core>`__.
predict_route (str):
Immutable. HTTP path on the container to send prediction
requests to. Vertex AI forwards requests sent using
Expand Down
Expand Up @@ -284,9 +284,10 @@ class ModelDeploymentMonitoringScheduleConfig(proto.Message):
Attributes:
monitor_interval (google.protobuf.duration_pb2.Duration):
Required. The model monitoring job running
Required. The model monitoring job scheduling
interval. It will be rounded up to next full
hour.
hour. This defines how often the monitoring jobs
are triggered.
"""

monitor_interval = proto.Field(
Expand Down
3 changes: 1 addition & 2 deletions google/cloud/aiplatform_v1beta1/types/study.py
Expand Up @@ -27,8 +27,7 @@


class Study(proto.Message):
r"""LINT.IfChange
A message representing a Study.
r"""A message representing a Study.
Attributes:
name (str):
Expand Down
Expand Up @@ -43,6 +43,7 @@
)
from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers
from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports
from google.cloud.aiplatform_v1beta1.types import accelerator_type
from google.cloud.aiplatform_v1beta1.types import index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import index_endpoint_service
Expand Down

0 comments on commit 7a7f0d4

Please sign in to comment.