Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add Autoscaling API #475

Merged
merged 7 commits into from Jan 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 8 additions & 0 deletions google/cloud/bigtable_admin_v2/__init__.py
Expand Up @@ -36,6 +36,8 @@
from .types.bigtable_instance_admin import ListClustersResponse
from .types.bigtable_instance_admin import ListInstancesRequest
from .types.bigtable_instance_admin import ListInstancesResponse
from .types.bigtable_instance_admin import PartialUpdateClusterMetadata
from .types.bigtable_instance_admin import PartialUpdateClusterRequest
from .types.bigtable_instance_admin import PartialUpdateInstanceRequest
from .types.bigtable_instance_admin import UpdateAppProfileMetadata
from .types.bigtable_instance_admin import UpdateAppProfileRequest
Expand Down Expand Up @@ -73,6 +75,8 @@
from .types.common import OperationProgress
from .types.common import StorageType
from .types.instance import AppProfile
from .types.instance import AutoscalingLimits
from .types.instance import AutoscalingTargets
from .types.instance import Cluster
from .types.instance import Instance
from .types.table import Backup
Expand All @@ -89,6 +93,8 @@
"BigtableInstanceAdminAsyncClient",
"BigtableTableAdminAsyncClient",
"AppProfile",
"AutoscalingLimits",
"AutoscalingTargets",
"Backup",
"BackupInfo",
"BigtableInstanceAdminClient",
Expand Down Expand Up @@ -140,6 +146,8 @@
"ModifyColumnFamiliesRequest",
"OperationProgress",
"OptimizeRestoredTableMetadata",
"PartialUpdateClusterMetadata",
"PartialUpdateClusterRequest",
"PartialUpdateInstanceRequest",
"RestoreInfo",
"RestoreSourceType",
Expand Down
10 changes: 10 additions & 0 deletions google/cloud/bigtable_admin_v2/gapic_metadata.json
Expand Up @@ -75,6 +75,11 @@
"list_instances"
]
},
"PartialUpdateCluster": {
"methods": [
"partial_update_cluster"
]
},
"PartialUpdateInstance": {
"methods": [
"partial_update_instance"
Expand Down Expand Up @@ -175,6 +180,11 @@
"list_instances"
]
},
"PartialUpdateCluster": {
"methods": [
"partial_update_cluster"
]
},
"PartialUpdateInstance": {
"methods": [
"partial_update_instance"
Expand Down
Expand Up @@ -206,6 +206,12 @@ async def create_instance(
) -> operation_async.AsyncOperation:
r"""Create an instance within a project.

Note that exactly one of Cluster.serve_nodes and
Cluster.cluster_config.cluster_autoscaling_config can be set. If
serve_nodes is set to non-zero, then the cluster is manually
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]):
The request object. Request message for
Expand Down Expand Up @@ -738,6 +744,12 @@ async def create_cluster(
) -> operation_async.AsyncOperation:
r"""Creates a cluster within an instance.

Note that exactly one of Cluster.serve_nodes and
Cluster.cluster_config.cluster_autoscaling_config can be set. If
serve_nodes is set to non-zero, then the cluster is manually
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]):
The request object. Request message for
Expand Down Expand Up @@ -1009,6 +1021,10 @@ async def update_cluster(
) -> operation_async.AsyncOperation:
r"""Updates a cluster within an instance.

Note that UpdateCluster does not support updating
cluster_config.cluster_autoscaling_config. In order to update
it, you must use PartialUpdateCluster.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]):
The request object. A resizable group of nodes in a
Expand Down Expand Up @@ -1072,6 +1088,126 @@ async def update_cluster(
# Done; return the response.
return response

async def partial_update_cluster(
self,
request: Union[
bigtable_instance_admin.PartialUpdateClusterRequest, dict
] = None,
*,
cluster: instance.Cluster = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Partially updates a cluster within a project. This method is the
preferred way to update a Cluster.

To enable and update autoscaling, set
cluster_config.cluster_autoscaling_config. When autoscaling is
enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
that updates to it are ignored. Note that an update cannot
simultaneously set serve_nodes to non-zero and
cluster_config.cluster_autoscaling_config to non-empty, and also
specify both in the update_mask.

To disable autoscaling, clear
cluster_config.cluster_autoscaling_config, and explicitly set a
serve_node count via the update_mask.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.PartialUpdateCluster.
cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`):
Required. The Cluster which contains the partial updates
to be applied, subject to the update_mask.

This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The subset of Cluster
fields which should be replaced.

This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.

Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.

The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
of serving all
[Tables][google.bigtable.admin.v2.Table] in the
parent [Instance][google.bigtable.admin.v2.Instance].

"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([cluster, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

request = bigtable_instance_admin.PartialUpdateClusterRequest(request)

# If we have keyword arguments corresponding to fields on the
# request, apply these.
if cluster is not None:
request.cluster = cluster
if update_mask is not None:
request.update_mask = update_mask

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.partial_update_cluster,
default_retry=retries.Retry(
initial=1.0,
maximum=60.0,
multiplier=2,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("cluster.name", request.cluster.name),)
),
)

# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)

# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
instance.Cluster,
metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata,
)

# Done; return the response.
return response

async def delete_cluster(
self,
request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None,
Expand Down
Expand Up @@ -437,6 +437,12 @@ def create_instance(
) -> operation.Operation:
r"""Create an instance within a project.

Note that exactly one of Cluster.serve_nodes and
Cluster.cluster_config.cluster_autoscaling_config can be set. If
serve_nodes is set to non-zero, then the cluster is manually
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]):
The request object. Request message for
Expand Down Expand Up @@ -931,6 +937,12 @@ def create_cluster(
) -> operation.Operation:
r"""Creates a cluster within an instance.

Note that exactly one of Cluster.serve_nodes and
Cluster.cluster_config.cluster_autoscaling_config can be set. If
serve_nodes is set to non-zero, then the cluster is manually
scaled. If cluster_config.cluster_autoscaling_config is
non-empty, then autoscaling is enabled.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]):
The request object. Request message for
Expand Down Expand Up @@ -1182,6 +1194,10 @@ def update_cluster(
) -> operation.Operation:
r"""Updates a cluster within an instance.

Note that UpdateCluster does not support updating
cluster_config.cluster_autoscaling_config. In order to update
it, you must use PartialUpdateCluster.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]):
The request object. A resizable group of nodes in a
Expand Down Expand Up @@ -1236,6 +1252,116 @@ def update_cluster(
# Done; return the response.
return response

def partial_update_cluster(
self,
request: Union[
bigtable_instance_admin.PartialUpdateClusterRequest, dict
] = None,
*,
cluster: instance.Cluster = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Partially updates a cluster within a project. This method is the
preferred way to update a Cluster.

To enable and update autoscaling, set
cluster_config.cluster_autoscaling_config. When autoscaling is
enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning
that updates to it are ignored. Note that an update cannot
simultaneously set serve_nodes to non-zero and
cluster_config.cluster_autoscaling_config to non-empty, and also
specify both in the update_mask.

To disable autoscaling, clear
cluster_config.cluster_autoscaling_config, and explicitly set a
serve_node count via the update_mask.

Args:
request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]):
The request object. Request message for
BigtableInstanceAdmin.PartialUpdateCluster.
cluster (google.cloud.bigtable_admin_v2.types.Cluster):
Required. The Cluster which contains the partial updates
to be applied, subject to the update_mask.

This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The subset of Cluster
fields which should be replaced.

This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.

Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.

The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable
of serving all
[Tables][google.bigtable.admin.v2.Table] in the
parent [Instance][google.bigtable.admin.v2.Instance].

"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([cluster, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

# Minor optimization to avoid making a copy if the user passes
# in a bigtable_instance_admin.PartialUpdateClusterRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest):
request = bigtable_instance_admin.PartialUpdateClusterRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if cluster is not None:
request.cluster = cluster
if update_mask is not None:
request.update_mask = update_mask

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster]

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("cluster.name", request.cluster.name),)
),
)

# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)

# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Cluster,
metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata,
)

# Done; return the response.
return response

def delete_cluster(
self,
request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None,
Expand Down