Skip to content
This repository has been archived by the owner on Oct 31, 2023. It is now read-only.

Commit

Permalink
feat: new parameters in FlexTemplateRuntimeEnvironment (#69)
Browse files Browse the repository at this point in the history
- [ ] Regenerate this pull request now.

chore: use gapic-generator-python 0.62.1

fix: resolve DuplicateCredentialArgs error when using credentials_file

committer: parthea
PiperOrigin-RevId: 425964861

Source-Link: googleapis/googleapis@84b1a5a

Source-Link: googleapis/googleapis-gen@4fb761b
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGZiNzYxYmJkODUwNmFjMTU2ZjQ5YmFjNWYxODMwNmFhOGViM2FhOCJ9
  • Loading branch information
gcf-owl-bot[bot] committed Feb 3, 2022
1 parent 888664b commit f8bd373
Show file tree
Hide file tree
Showing 27 changed files with 568 additions and 56 deletions.
Expand Up @@ -159,8 +159,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -204,8 +204,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -161,8 +161,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -206,8 +206,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -159,8 +159,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -204,8 +204,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -239,8 +239,8 @@ async def get_job_metrics(
in the job.
This resource captures only the most
recent values of each metric; time-
series data can be queried for them
recent values of each metric;
time-series data can be queried for them
(under the same metric names) from Cloud
Monitoring.
Expand Down
Expand Up @@ -422,8 +422,8 @@ def get_job_metrics(
in the job.
This resource captures only the most
recent values of each metric; time-
series data can be queried for them
recent values of each metric;
time-series data can be queried for them
(under the same metric names) from Cloud
Monitoring.
Expand Down
Expand Up @@ -159,8 +159,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -204,8 +204,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -159,8 +159,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -204,8 +204,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -160,8 +160,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
Expand Up @@ -205,8 +205,11 @@ def __init__(
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
credentials_file=credentials_file,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
Expand Down
7 changes: 3 additions & 4 deletions google/cloud/dataflow_v1beta3/types/environment.py
Expand Up @@ -280,8 +280,8 @@ class Disk(proto.Message):
default.
For example, the standard persistent disk type
is a resource name typically ending in "pd-
standard". If SSD persistent disks are
is a resource name typically ending in
"pd-standard". If SSD persistent disks are
available, the resource name typically ends with
"pd-ssd". The actual valid values are defined
the Google Compute Engine API, not by the Cloud
Expand All @@ -294,8 +294,7 @@ class Disk(proto.Message):
the resource name will typically look something
like this:
compute.googleapis.com/projects/project-
id/zones/zone/diskTypes/pd-standard
compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
mount_point (str):
Directory in a VM where disk is mounted.
"""
Expand Down
8 changes: 4 additions & 4 deletions google/cloud/dataflow_v1beta3/types/jobs.py
Expand Up @@ -331,7 +331,7 @@ class FileIODetails(proto.Message):


class BigTableIODetails(proto.Message):
r"""Metadata for a Cloud BigTable connector used by the job.
r"""Metadata for a Cloud Bigtable connector used by the job.
Attributes:
project_id (str):
Expand Down Expand Up @@ -424,11 +424,11 @@ class JobMetadata(proto.Message):
Identification of a BigQuery source used in
the Dataflow job.
big_table_details (Sequence[google.cloud.dataflow_v1beta3.types.BigTableIODetails]):
Identification of a Cloud BigTable source
Identification of a Cloud Bigtable source
used in the Dataflow job.
pubsub_details (Sequence[google.cloud.dataflow_v1beta3.types.PubSubIODetails]):
Identification of a PubSub source used in the
Dataflow job.
Identification of a Pub/Sub source used in
the Dataflow job.
file_details (Sequence[google.cloud.dataflow_v1beta3.types.FileIODetails]):
Identification of a File source used in the
Dataflow job.
Expand Down
5 changes: 2 additions & 3 deletions google/cloud/dataflow_v1beta3/types/messages.py
Expand Up @@ -118,9 +118,8 @@ class AutoscalingEvent(proto.Message):
The time this event was emitted to indicate a new target or
current num_workers value.
worker_pool (str):
A short and friendly name for the worker pool this event
refers to, populated from the value of
PoolStageRelation::user_pool_name.
A short and friendly name for the worker pool
this event refers to.
"""

class AutoscalingEventType(proto.Enum):
Expand Down
8 changes: 4 additions & 4 deletions google/cloud/dataflow_v1beta3/types/metrics.py
Expand Up @@ -86,8 +86,8 @@ class MetricUpdate(proto.Message):
Metric aggregation kind. The possible metric
aggregation kinds are "Sum", "Max", "Min",
"Mean", "Set", "And", "Or", and "Distribution".
The specified aggregation kind is case-
insensitive.
The specified aggregation kind is
case-insensitive.
If omitted, this is not an aggregated value but
instead a single metric sample value.
cumulative (bool):
Expand Down Expand Up @@ -177,8 +177,8 @@ class GetJobMetricsRequest(proto.Message):

class JobMetrics(proto.Message):
r"""JobMetrics contains a collection of metrics describing the
detailed progress of a Dataflow job. Metrics correspond to user-
defined and system-defined metrics in the job.
detailed progress of a Dataflow job. Metrics correspond to
user-defined and system-defined metrics in the job.
This resource captures only the most recent values of each
metric; time-series data can be queried for them (under the same
Expand Down
2 changes: 1 addition & 1 deletion google/cloud/dataflow_v1beta3/types/snapshots.py
Expand Up @@ -79,7 +79,7 @@ class Snapshot(proto.Message):
state (google.cloud.dataflow_v1beta3.types.SnapshotState):
State of the snapshot.
pubsub_metadata (Sequence[google.cloud.dataflow_v1beta3.types.PubsubSnapshotMetadata]):
PubSub snapshot metadata.
Pub/Sub snapshot metadata.
description (str):
User specified description of the snapshot.
Maybe empty.
Expand Down
10 changes: 4 additions & 6 deletions google/cloud/dataflow_v1beta3/types/streaming.py
Expand Up @@ -78,12 +78,10 @@ class PubsubLocation(proto.Message):
Attributes:
topic (str):
A pubsub topic, in the form of
"pubsub.googleapis.com/topics/<project-
id>/<topic-name>".
"pubsub.googleapis.com/topics/<project-id>/<topic-name>".
subscription (str):
A pubsub subscription, in the form of
"pubsub.googleapis.com/subscriptions/<project-
id>/<subscription-name>".
"pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>".
timestamp_label (str):
If set, contains a pubsub label from which to
extract record timestamps. If left empty, record
Expand All @@ -93,8 +91,8 @@ class PubsubLocation(proto.Message):
extract record ids. If left empty, record
deduplication will be strictly best effort.
drop_late_data (bool):
Indicates whether the pipeline allows late-
rriving data.
Indicates whether the pipeline allows
late-arriving data.
tracking_subscription (str):
If set, specifies the pubsub subscription
that will be used for tracking custom time
Expand Down
28 changes: 27 additions & 1 deletion google/cloud/dataflow_v1beta3/types/templates.py
Expand Up @@ -239,6 +239,24 @@ class FlexTemplateRuntimeEnvironment(proto.Message):
to use for the 'worker harness. Default is the
container for the version of the SDK. Note this
field is only valid for portable pipelines.
disk_size_gb (int):
Worker disk size, in gigabytes.
autoscaling_algorithm (google.cloud.dataflow_v1beta3.types.AutoscalingAlgorithm):
The algorithm to use for autoscaling
dump_heap_on_oom (bool):
If true, save a heap dump before killing a
thread or process which is GC thrashing or out
of memory. The location of the heap file will
either be echoed back to the user, or the user
will be given the opportunity to download the
heap file.
save_heap_dumps_to_gcs_path (str):
Cloud Storage bucket (directory) to upload heap dumps to the
given location. Enabling this implies that heap dumps should
be generated on OOM (dump_heap_on_oom is set to true).
launcher_machine_type (str):
The machine type to use for launching the
job. The default is n1-standard-1.
"""

num_workers = proto.Field(proto.INT32, number=1,)
Expand All @@ -263,6 +281,13 @@ class FlexTemplateRuntimeEnvironment(proto.Message):
)
staging_location = proto.Field(proto.STRING, number=17,)
sdk_container_image = proto.Field(proto.STRING, number=18,)
disk_size_gb = proto.Field(proto.INT32, number=20,)
autoscaling_algorithm = proto.Field(
proto.ENUM, number=21, enum=gd_environment.AutoscalingAlgorithm,
)
dump_heap_on_oom = proto.Field(proto.BOOL, number=22,)
save_heap_dumps_to_gcs_path = proto.Field(proto.STRING, number=23,)
launcher_machine_type = proto.Field(proto.STRING, number=24,)


class LaunchFlexTemplateRequest(proto.Message):
Expand Down Expand Up @@ -321,7 +346,8 @@ class RuntimeEnvironment(proto.Message):
The machine type to use for the job. Defaults
to the value from the template if not specified.
additional_experiments (Sequence[str]):
Additional experiment flags for the job.
Additional experiment flags for the job, specified with the
``--experiments`` option.
network (str):
Network to which VMs will be assigned. If
empty or unspecified, the service will use the
Expand Down

0 comments on commit f8bd373

Please sign in to comment.