diff --git a/google/cloud/dataproc/__init__.py b/google/cloud/dataproc/__init__.py index 5ea01691..34ff7fb0 100644 --- a/google/cloud/dataproc/__init__.py +++ b/google/cloud/dataproc/__init__.py @@ -83,6 +83,7 @@ from google.cloud.dataproc_v1.types.batches import SparkSqlBatch from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig +from google.cloud.dataproc_v1.types.clusters import AuxiliaryServicesConfig from google.cloud.dataproc_v1.types.clusters import Cluster from google.cloud.dataproc_v1.types.clusters import ClusterConfig from google.cloud.dataproc_v1.types.clusters import ClusterMetrics @@ -97,7 +98,6 @@ from google.cloud.dataproc_v1.types.clusters import EndpointConfig from google.cloud.dataproc_v1.types.clusters import GceClusterConfig from google.cloud.dataproc_v1.types.clusters import GetClusterRequest -from google.cloud.dataproc_v1.types.clusters import GkeClusterConfig from google.cloud.dataproc_v1.types.clusters import IdentityConfig from google.cloud.dataproc_v1.types.clusters import InstanceGroupConfig from google.cloud.dataproc_v1.types.clusters import KerberosConfig @@ -115,6 +115,7 @@ from google.cloud.dataproc_v1.types.clusters import StartClusterRequest from google.cloud.dataproc_v1.types.clusters import StopClusterRequest from google.cloud.dataproc_v1.types.clusters import UpdateClusterRequest +from google.cloud.dataproc_v1.types.clusters import VirtualClusterConfig from google.cloud.dataproc_v1.types.jobs import CancelJobRequest from google.cloud.dataproc_v1.types.jobs import DeleteJobRequest from google.cloud.dataproc_v1.types.jobs import GetJobRequest @@ -144,6 +145,11 @@ from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus from google.cloud.dataproc_v1.types.shared import EnvironmentConfig from google.cloud.dataproc_v1.types.shared import ExecutionConfig +from google.cloud.dataproc_v1.types.shared import GkeClusterConfig +from google.cloud.dataproc_v1.types.shared import GkeNodePoolConfig +from google.cloud.dataproc_v1.types.shared import GkeNodePoolTarget +from google.cloud.dataproc_v1.types.shared import KubernetesClusterConfig +from google.cloud.dataproc_v1.types.shared import KubernetesSoftwareConfig from google.cloud.dataproc_v1.types.shared import PeripheralsConfig from google.cloud.dataproc_v1.types.shared import RuntimeConfig from google.cloud.dataproc_v1.types.shared import RuntimeInfo @@ -219,6 +225,7 @@ "SparkSqlBatch", "AcceleratorConfig", "AutoscalingConfig", + "AuxiliaryServicesConfig", "Cluster", "ClusterConfig", "ClusterMetrics", @@ -233,7 +240,6 @@ "EndpointConfig", "GceClusterConfig", "GetClusterRequest", - "GkeClusterConfig", "IdentityConfig", "InstanceGroupConfig", "KerberosConfig", @@ -251,6 +257,7 @@ "StartClusterRequest", "StopClusterRequest", "UpdateClusterRequest", + "VirtualClusterConfig", "CancelJobRequest", "DeleteJobRequest", "GetJobRequest", @@ -280,6 +287,11 @@ "ClusterOperationStatus", "EnvironmentConfig", "ExecutionConfig", + "GkeClusterConfig", + "GkeNodePoolConfig", + "GkeNodePoolTarget", + "KubernetesClusterConfig", + "KubernetesSoftwareConfig", "PeripheralsConfig", "RuntimeConfig", "RuntimeInfo", diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index 2c9ea7da..7f4c4ef5 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -47,6 +47,7 @@ from .types.batches import SparkSqlBatch from .types.clusters import AcceleratorConfig from .types.clusters import AutoscalingConfig +from .types.clusters import AuxiliaryServicesConfig from .types.clusters import Cluster from .types.clusters import ClusterConfig from .types.clusters import ClusterMetrics @@ -61,7 +62,6 @@ from .types.clusters import EndpointConfig from .types.clusters import GceClusterConfig from .types.clusters import GetClusterRequest -from .types.clusters import GkeClusterConfig from .types.clusters import IdentityConfig from .types.clusters import InstanceGroupConfig from .types.clusters import KerberosConfig @@ -79,6 +79,7 @@ from .types.clusters import StartClusterRequest from .types.clusters import StopClusterRequest from .types.clusters import UpdateClusterRequest +from .types.clusters import VirtualClusterConfig from .types.jobs import CancelJobRequest from .types.jobs import DeleteJobRequest from .types.jobs import GetJobRequest @@ -108,6 +109,11 @@ from .types.operations import ClusterOperationStatus from .types.shared import EnvironmentConfig from .types.shared import ExecutionConfig +from .types.shared import GkeClusterConfig +from .types.shared import GkeNodePoolConfig +from .types.shared import GkeNodePoolTarget +from .types.shared import KubernetesClusterConfig +from .types.shared import KubernetesSoftwareConfig from .types.shared import PeripheralsConfig from .types.shared import RuntimeConfig from .types.shared import RuntimeInfo @@ -146,6 +152,7 @@ "AutoscalingConfig", "AutoscalingPolicy", "AutoscalingPolicyServiceClient", + "AuxiliaryServicesConfig", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", "Batch", @@ -187,6 +194,8 @@ "GetJobRequest", "GetWorkflowTemplateRequest", "GkeClusterConfig", + "GkeNodePoolConfig", + "GkeNodePoolTarget", "HadoopJob", "HiveJob", "IdentityConfig", @@ -202,6 +211,8 @@ "JobScheduling", "JobStatus", "KerberosConfig", + "KubernetesClusterConfig", + "KubernetesSoftwareConfig", "LifecycleConfig", "ListAutoscalingPoliciesRequest", "ListAutoscalingPoliciesResponse", @@ -250,6 +261,7 @@ "UpdateJobRequest", "UpdateWorkflowTemplateRequest", "ValueValidation", + "VirtualClusterConfig", "WorkflowGraph", "WorkflowMetadata", "WorkflowNode", diff --git a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py index 7c375f3c..fac915db 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py @@ -53,8 +53,6 @@ class ClusterControllerAsyncClient: DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT - cluster_path = staticmethod(ClusterControllerClient.cluster_path) - parse_cluster_path = staticmethod(ClusterControllerClient.parse_cluster_path) service_path = staticmethod(ClusterControllerClient.service_path) parse_service_path = staticmethod(ClusterControllerClient.parse_service_path) common_billing_account_path = staticmethod( diff --git a/google/cloud/dataproc_v1/services/cluster_controller/client.py b/google/cloud/dataproc_v1/services/cluster_controller/client.py index e4e3fc43..f4a73a6f 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/client.py @@ -168,22 +168,6 @@ def transport(self) -> ClusterControllerTransport: """ return self._transport - @staticmethod - def cluster_path(project: str, location: str, cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format( - project=project, location=location, cluster=cluster, - ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str, str]: - """Parses a cluster path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - @staticmethod def service_path(project: str, location: str, service: str,) -> str: """Returns a fully-qualified service string.""" diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py index 1efc7e29..96cc0f10 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py @@ -53,8 +53,6 @@ class WorkflowTemplateServiceAsyncClient: DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT - cluster_path = staticmethod(WorkflowTemplateServiceClient.cluster_path) - parse_cluster_path = staticmethod(WorkflowTemplateServiceClient.parse_cluster_path) service_path = staticmethod(WorkflowTemplateServiceClient.service_path) parse_service_path = staticmethod(WorkflowTemplateServiceClient.parse_service_path) workflow_template_path = staticmethod( diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/google/cloud/dataproc_v1/services/workflow_template_service/client.py index 6ed26046..81761d80 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/client.py @@ -168,22 +168,6 @@ def transport(self) -> WorkflowTemplateServiceTransport: """ return self._transport - @staticmethod - def cluster_path(project: str, location: str, cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/locations/{location}/clusters/{cluster}".format( - project=project, location=location, cluster=cluster, - ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str, str]: - """Parses a cluster path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/clusters/(?P.+?)$", - path, - ) - return m.groupdict() if m else {} - @staticmethod def service_path(project: str, location: str, service: str,) -> str: """Returns a fully-qualified service string.""" diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py index 2ac23800..418e53d3 100644 --- a/google/cloud/dataproc_v1/types/__init__.py +++ b/google/cloud/dataproc_v1/types/__init__.py @@ -40,6 +40,7 @@ from .clusters import ( AcceleratorConfig, AutoscalingConfig, + AuxiliaryServicesConfig, Cluster, ClusterConfig, ClusterMetrics, @@ -54,7 +55,6 @@ EndpointConfig, GceClusterConfig, GetClusterRequest, - GkeClusterConfig, IdentityConfig, InstanceGroupConfig, KerberosConfig, @@ -72,6 +72,7 @@ StartClusterRequest, StopClusterRequest, UpdateClusterRequest, + VirtualClusterConfig, ) from .jobs import ( CancelJobRequest, @@ -107,6 +108,11 @@ from .shared import ( EnvironmentConfig, ExecutionConfig, + GkeClusterConfig, + GkeNodePoolConfig, + GkeNodePoolTarget, + KubernetesClusterConfig, + KubernetesSoftwareConfig, PeripheralsConfig, RuntimeConfig, RuntimeInfo, @@ -161,6 +167,7 @@ "SparkSqlBatch", "AcceleratorConfig", "AutoscalingConfig", + "AuxiliaryServicesConfig", "Cluster", "ClusterConfig", "ClusterMetrics", @@ -175,7 +182,6 @@ "EndpointConfig", "GceClusterConfig", "GetClusterRequest", - "GkeClusterConfig", "IdentityConfig", "InstanceGroupConfig", "KerberosConfig", @@ -193,6 +199,7 @@ "StartClusterRequest", "StopClusterRequest", "UpdateClusterRequest", + "VirtualClusterConfig", "CancelJobRequest", "DeleteJobRequest", "GetJobRequest", @@ -222,6 +229,11 @@ "ClusterOperationStatus", "EnvironmentConfig", "ExecutionConfig", + "GkeClusterConfig", + "GkeNodePoolConfig", + "GkeNodePoolTarget", + "KubernetesClusterConfig", + "KubernetesSoftwareConfig", "PeripheralsConfig", "RuntimeConfig", "RuntimeInfo", diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py index 00125c16..7ec310a3 100644 --- a/google/cloud/dataproc_v1/types/clusters.py +++ b/google/cloud/dataproc_v1/types/clusters.py @@ -26,7 +26,8 @@ manifest={ "Cluster", "ClusterConfig", - "GkeClusterConfig", + "VirtualClusterConfig", + "AuxiliaryServicesConfig", "EndpointConfig", "AutoscalingConfig", "EncryptionConfig", @@ -79,6 +80,15 @@ class Cluster(proto.Message): Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated. + virtual_cluster_config (google.cloud.dataproc_v1.types.VirtualClusterConfig): + Optional. The virtual cluster config, used when creating a + Dataproc cluster that does not directly control the + underlying compute resources, for example, when creating a + `Dataproc-on-GKE + cluster `__. + Note that Dataproc may set default values, and values may + change when clusters are updated. Exactly one of config or + virtualClusterConfig must be specified. labels (Sequence[google.cloud.dataproc_v1.types.Cluster.LabelsEntry]): Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform @@ -106,6 +116,9 @@ class Cluster(proto.Message): project_id = proto.Field(proto.STRING, number=1,) cluster_name = proto.Field(proto.STRING, number=2,) config = proto.Field(proto.MESSAGE, number=3, message="ClusterConfig",) + virtual_cluster_config = proto.Field( + proto.MESSAGE, number=10, message="VirtualClusterConfig", + ) labels = proto.MapField(proto.STRING, proto.STRING, number=8,) status = proto.Field(proto.MESSAGE, number=4, message="ClusterStatus",) status_history = proto.RepeatedField( @@ -192,13 +205,6 @@ class ClusterConfig(proto.Message): this cluster metastore_config (google.cloud.dataproc_v1.types.MetastoreConfig): Optional. Metastore configuration. - gke_cluster_config (google.cloud.dataproc_v1.types.GkeClusterConfig): - Optional. BETA. The Kubernetes Engine config for Dataproc - clusters deployed to Kubernetes. Setting this is considered - mutually exclusive with Compute Engine-based options such as - ``gce_cluster_config``, ``master_config``, - ``worker_config``, ``secondary_worker_config``, and - ``autoscaling_config``. """ config_bucket = proto.Field(proto.STRING, number=1,) @@ -227,37 +233,81 @@ class ClusterConfig(proto.Message): lifecycle_config = proto.Field(proto.MESSAGE, number=17, message="LifecycleConfig",) endpoint_config = proto.Field(proto.MESSAGE, number=19, message="EndpointConfig",) metastore_config = proto.Field(proto.MESSAGE, number=20, message="MetastoreConfig",) - gke_cluster_config = proto.Field( - proto.MESSAGE, number=21, message="GkeClusterConfig", - ) -class GkeClusterConfig(proto.Message): - r"""The GKE config for this cluster. +class VirtualClusterConfig(proto.Message): + r"""Dataproc cluster config for a cluster that does not directly control + the underlying compute resources, such as a `Dataproc-on-GKE + cluster `__. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: - namespaced_gke_deployment_target (google.cloud.dataproc_v1.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): - Optional. A target for the deployment. + staging_bucket (str): + Optional. A Storage bucket used to stage job dependencies, + config files, and job driver console output. If you do not + specify a staging bucket, Cloud Dataproc will determine a + Cloud Storage location (US, ASIA, or EU) for your cluster's + staging bucket according to the Compute Engine zone where + your cluster is deployed, and then create and manage this + project-level, per-location bucket (see `Dataproc staging + and temp + buckets `__). + **This field requires a Cloud Storage bucket name, not a + ``gs://...`` URI to a Cloud Storage bucket.** + temp_bucket (str): + Optional. A Cloud Storage bucket used to store ephemeral + cluster and jobs data, such as Spark and MapReduce history + files. If you do not specify a temp bucket, Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's temp bucket according to the Compute Engine + zone where your cluster is deployed, and then create and + manage this project-level, per-location bucket. The default + bucket has a TTL of 90 days, but you can use any TTL (or + none) if you specify a bucket (see `Dataproc staging and + temp + buckets `__). + **This field requires a Cloud Storage bucket name, not a + ``gs://...`` URI to a Cloud Storage bucket.** + kubernetes_cluster_config (google.cloud.dataproc_v1.types.KubernetesClusterConfig): + Required. The configuration for running the + Dataproc cluster on Kubernetes. + + This field is a member of `oneof`_ ``infrastructure_config``. + auxiliary_services_config (google.cloud.dataproc_v1.types.AuxiliaryServicesConfig): + Optional. Configuration of auxiliary services + used by this cluster. """ - class NamespacedGkeDeploymentTarget(proto.Message): - r"""A full, namespace-isolated deployment target for an existing - GKE cluster. - - Attributes: - target_gke_cluster (str): - Optional. The target GKE cluster to deploy to. Format: - 'projects/{project}/locations/{location}/clusters/{cluster_id}' - cluster_namespace (str): - Optional. A namespace within the GKE cluster - to deploy into. - """ + staging_bucket = proto.Field(proto.STRING, number=1,) + temp_bucket = proto.Field(proto.STRING, number=2,) + kubernetes_cluster_config = proto.Field( + proto.MESSAGE, + number=6, + oneof="infrastructure_config", + message=shared.KubernetesClusterConfig, + ) + auxiliary_services_config = proto.Field( + proto.MESSAGE, number=7, message="AuxiliaryServicesConfig", + ) + + +class AuxiliaryServicesConfig(proto.Message): + r"""Auxiliary services configuration for a Cluster. - target_gke_cluster = proto.Field(proto.STRING, number=1,) - cluster_namespace = proto.Field(proto.STRING, number=2,) + Attributes: + metastore_config (google.cloud.dataproc_v1.types.MetastoreConfig): + Optional. The Hive Metastore configuration + for this workload. + spark_history_server_config (google.cloud.dataproc_v1.types.SparkHistoryServerConfig): + Optional. The Spark History Server + configuration for the workload. + """ - namespaced_gke_deployment_target = proto.Field( - proto.MESSAGE, number=1, message=NamespacedGkeDeploymentTarget, + metastore_config = proto.Field(proto.MESSAGE, number=1, message="MetastoreConfig",) + spark_history_server_config = proto.Field( + proto.MESSAGE, number=2, message=shared.SparkHistoryServerConfig, ) @@ -686,8 +736,8 @@ class DiskConfig(proto.Message): local_ssd_interface (str): Optional. Interface type of local SSDs (default is "scsi"). Valid values: "scsi" (Small Computer System Interface), - "nvme" (Non-Volatile Memory Express). See `SSD Interface - types `__. + "nvme" (Non-Volatile Memory Express). See `local SSD + performance `__. """ boot_disk_type = proto.Field(proto.STRING, number=3,) diff --git a/google/cloud/dataproc_v1/types/shared.py b/google/cloud/dataproc_v1/types/shared.py index 990bef54..bd7d2b80 100644 --- a/google/cloud/dataproc_v1/types/shared.py +++ b/google/cloud/dataproc_v1/types/shared.py @@ -27,6 +27,11 @@ "SparkHistoryServerConfig", "PeripheralsConfig", "RuntimeInfo", + "GkeClusterConfig", + "KubernetesClusterConfig", + "KubernetesSoftwareConfig", + "GkeNodePoolTarget", + "GkeNodePoolConfig", }, ) @@ -192,4 +197,237 @@ class RuntimeInfo(proto.Message): diagnostic_output_uri = proto.Field(proto.STRING, number=3,) +class GkeClusterConfig(proto.Message): + r"""The cluster's GKE config. + + Attributes: + gke_cluster_target (str): + Optional. A target GKE cluster to deploy to. It must be in + the same project and region as the Dataproc cluster (the GKE + cluster can be zonal or regional). Format: + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + node_pool_target (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget]): + Optional. GKE NodePools where workloads will + be scheduled. At least one node pool must be + assigned the 'default' role. Each role can be + given to only a single NodePoolTarget. All + NodePools must have the same location settings. + If a nodePoolTarget is not specified, Dataproc + constructs a default nodePoolTarget. + """ + + gke_cluster_target = proto.Field(proto.STRING, number=2,) + node_pool_target = proto.RepeatedField( + proto.MESSAGE, number=3, message="GkeNodePoolTarget", + ) + + +class KubernetesClusterConfig(proto.Message): + r"""The configuration for running the Dataproc cluster on + Kubernetes. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + kubernetes_namespace (str): + Optional. A namespace within the Kubernetes + cluster to deploy into. If this namespace does + not exist, it is created. If it exists, Dataproc + verifies that another Dataproc VirtualCluster is + not installed into it. If not specified, the + name of the Dataproc Cluster is used. + gke_cluster_config (google.cloud.dataproc_v1.types.GkeClusterConfig): + Required. The configuration for running the + Dataproc cluster on GKE. + + This field is a member of `oneof`_ ``config``. + kubernetes_software_config (google.cloud.dataproc_v1.types.KubernetesSoftwareConfig): + Optional. The software configuration for this + Dataproc cluster running on Kubernetes. + """ + + kubernetes_namespace = proto.Field(proto.STRING, number=1,) + gke_cluster_config = proto.Field( + proto.MESSAGE, number=2, oneof="config", message="GkeClusterConfig", + ) + kubernetes_software_config = proto.Field( + proto.MESSAGE, number=3, message="KubernetesSoftwareConfig", + ) + + +class KubernetesSoftwareConfig(proto.Message): + r"""The software configuration for this Dataproc cluster running + on Kubernetes. + + Attributes: + component_version (Sequence[google.cloud.dataproc_v1.types.KubernetesSoftwareConfig.ComponentVersionEntry]): + The components that should be installed in + this Dataproc cluster. The key must be a string + from the KubernetesComponent enumeration. The + value is the version of the software to be + installed. + At least one entry must be specified. + properties (Sequence[google.cloud.dataproc_v1.types.KubernetesSoftwareConfig.PropertiesEntry]): + The properties to set on daemon config files. + + Property keys are specified in ``prefix:property`` format, + for example ``spark:spark.kubernetes.container.image``. The + following are supported prefixes and their mappings: + + - spark: ``spark-defaults.conf`` + + For more information, see `Cluster + properties `__. + """ + + component_version = proto.MapField(proto.STRING, proto.STRING, number=1,) + properties = proto.MapField(proto.STRING, proto.STRING, number=2,) + + +class GkeNodePoolTarget(proto.Message): + r"""GKE NodePools that Dataproc workloads run on. + + Attributes: + node_pool (str): + Required. The target GKE NodePool. Format: + 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' + roles (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget.Role]): + Required. The types of role for a GKE + NodePool + node_pool_config (google.cloud.dataproc_v1.types.GkeNodePoolConfig): + Optional. The configuration for the GKE + NodePool. + If specified, Dataproc attempts to create a + NodePool with the specified shape. If one with + the same name already exists, it is verified + against all specified fields. If a field + differs, the virtual cluster creation will fail. + + If omitted, any NodePool with the specified name + is used. If a NodePool with the specified name + does not exist, Dataproc create a NodePool with + default values. + """ + + class Role(proto.Enum): + r"""``Role`` specifies whose tasks will run on the NodePool. The roles + can be specific to workloads. Exactly one GkeNodePoolTarget within + the VirtualCluster must have 'default' role, which is used to run + all workloads that are not associated with a NodePool. + """ + ROLE_UNSPECIFIED = 0 + DEFAULT = 1 + CONTROLLER = 2 + SPARK_DRIVER = 3 + SPARK_EXECUTOR = 4 + + node_pool = proto.Field(proto.STRING, number=1,) + roles = proto.RepeatedField(proto.ENUM, number=2, enum=Role,) + node_pool_config = proto.Field( + proto.MESSAGE, number=3, message="GkeNodePoolConfig", + ) + + +class GkeNodePoolConfig(proto.Message): + r"""The configuration of a GKE NodePool used by a `Dataproc-on-GKE + cluster `__. + + Attributes: + config (google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodeConfig): + Optional. The node pool configuration. + locations (Sequence[str]): + Optional. The list of Compute Engine + `zones `__ + where NodePool's nodes will be located. + + **Note:** Currently, only one zone may be specified. + + If a location is not specified during NodePool creation, + Dataproc will choose a location. + autoscaling (google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig): + Optional. The autoscaler configuration for + this NodePool. The autoscaler is enabled only + when a valid configuration is present. + """ + + class GkeNodeConfig(proto.Message): + r"""Parameters that describe cluster nodes. + + Attributes: + machine_type (str): + Optional. The name of a Compute Engine `machine + type `__. + preemptible (bool): + Optional. Whether the nodes are created as `preemptible VM + instances `__. + local_ssd_count (int): + Optional. The number of local SSD disks to attach to the + node, which is limited by the maximum number of disks + allowable per zone (see `Adding Local + SSDs `__). + accelerators (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig]): + Optional. A list of `hardware + accelerators `__ + to attach to each node. + min_cpu_platform (str): + Optional. `Minimum CPU + platform `__ + to be used by this instance. The instance may be scheduled + on the specified or a newer CPU platform. Specify the + friendly names of CPU platforms, such as "Intel Haswell"\` + or Intel Sandy Bridge". + """ + + machine_type = proto.Field(proto.STRING, number=1,) + preemptible = proto.Field(proto.BOOL, number=10,) + local_ssd_count = proto.Field(proto.INT32, number=7,) + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=11, + message="GkeNodePoolConfig.GkeNodePoolAcceleratorConfig", + ) + min_cpu_platform = proto.Field(proto.STRING, number=13,) + + class GkeNodePoolAcceleratorConfig(proto.Message): + r"""A GkeNodeConfigAcceleratorConfig represents a Hardware + Accelerator request for a NodePool. + + Attributes: + accelerator_count (int): + The number of accelerator cards exposed to an + instance. + accelerator_type (str): + The accelerator type resource namename (see + GPUs on Compute Engine). + """ + + accelerator_count = proto.Field(proto.INT64, number=1,) + accelerator_type = proto.Field(proto.STRING, number=2,) + + class GkeNodePoolAutoscalingConfig(proto.Message): + r"""GkeNodePoolAutoscaling contains information the cluster + autoscaler needs to adjust the size of the node pool to the + current cluster usage. + + Attributes: + min_node_count (int): + The minimum number of nodes in the NodePool. Must be >= 0 + and <= max_node_count. + max_node_count (int): + The maximum number of nodes in the NodePool. Must be >= + min_node_count. **Note:** Quota must be sufficient to scale + up the cluster. + """ + + min_node_count = proto.Field(proto.INT32, number=2,) + max_node_count = proto.Field(proto.INT32, number=3,) + + config = proto.Field(proto.MESSAGE, number=2, message=GkeNodeConfig,) + locations = proto.RepeatedField(proto.STRING, number=13,) + autoscaling = proto.Field( + proto.MESSAGE, number=4, message=GkeNodePoolAutoscalingConfig, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index 7b5a7332..01baa92f 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -2442,34 +2442,10 @@ def test_cluster_controller_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_cluster_path(): +def test_service_path(): project = "squid" location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format( - project=project, location=location, cluster=cluster, - ) - actual = ClusterControllerClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = ClusterControllerClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterControllerClient.parse_cluster_path(path) - assert expected == actual - - -def test_service_path(): - project = "cuttlefish" - location = "mussel" - service = "winkle" + service = "whelk" expected = "projects/{project}/locations/{location}/services/{service}".format( project=project, location=location, service=service, ) @@ -2479,9 +2455,9 @@ def test_service_path(): def test_parse_service_path(): expected = { - "project": "nautilus", - "location": "scallop", - "service": "abalone", + "project": "octopus", + "location": "oyster", + "service": "nudibranch", } path = ClusterControllerClient.service_path(**expected) @@ -2491,7 +2467,7 @@ def test_parse_service_path(): def test_common_billing_account_path(): - billing_account = "squid" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2501,7 +2477,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "clam", + "billing_account": "mussel", } path = ClusterControllerClient.common_billing_account_path(**expected) @@ -2511,7 +2487,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "whelk" + folder = "winkle" expected = "folders/{folder}".format(folder=folder,) actual = ClusterControllerClient.common_folder_path(folder) assert expected == actual @@ -2519,7 +2495,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "octopus", + "folder": "nautilus", } path = ClusterControllerClient.common_folder_path(**expected) @@ -2529,7 +2505,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "oyster" + organization = "scallop" expected = "organizations/{organization}".format(organization=organization,) actual = ClusterControllerClient.common_organization_path(organization) assert expected == actual @@ -2537,7 +2513,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nudibranch", + "organization": "abalone", } path = ClusterControllerClient.common_organization_path(**expected) @@ -2547,7 +2523,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "cuttlefish" + project = "squid" expected = "projects/{project}".format(project=project,) actual = ClusterControllerClient.common_project_path(project) assert expected == actual @@ -2555,7 +2531,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "mussel", + "project": "clam", } path = ClusterControllerClient.common_project_path(**expected) @@ -2565,8 +2541,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "winkle" - location = "nautilus" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2576,8 +2552,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "scallop", - "location": "abalone", + "project": "oyster", + "location": "nudibranch", } path = ClusterControllerClient.common_location_path(**expected) diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py index 68c881de..bc319bed 100644 --- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -2944,34 +2944,10 @@ def test_workflow_template_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_cluster_path(): +def test_service_path(): project = "squid" location = "clam" - cluster = "whelk" - expected = "projects/{project}/locations/{location}/clusters/{cluster}".format( - project=project, location=location, cluster=cluster, - ) - actual = WorkflowTemplateServiceClient.cluster_path(project, location, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "octopus", - "location": "oyster", - "cluster": "nudibranch", - } - path = WorkflowTemplateServiceClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = WorkflowTemplateServiceClient.parse_cluster_path(path) - assert expected == actual - - -def test_service_path(): - project = "cuttlefish" - location = "mussel" - service = "winkle" + service = "whelk" expected = "projects/{project}/locations/{location}/services/{service}".format( project=project, location=location, service=service, ) @@ -2981,9 +2957,9 @@ def test_service_path(): def test_parse_service_path(): expected = { - "project": "nautilus", - "location": "scallop", - "service": "abalone", + "project": "octopus", + "location": "oyster", + "service": "nudibranch", } path = WorkflowTemplateServiceClient.service_path(**expected) @@ -2993,9 +2969,9 @@ def test_parse_service_path(): def test_workflow_template_path(): - project = "squid" - region = "clam" - workflow_template = "whelk" + project = "cuttlefish" + region = "mussel" + workflow_template = "winkle" expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( project=project, region=region, workflow_template=workflow_template, ) @@ -3007,9 +2983,9 @@ def test_workflow_template_path(): def test_parse_workflow_template_path(): expected = { - "project": "octopus", - "region": "oyster", - "workflow_template": "nudibranch", + "project": "nautilus", + "region": "scallop", + "workflow_template": "abalone", } path = WorkflowTemplateServiceClient.workflow_template_path(**expected) @@ -3019,7 +2995,7 @@ def test_parse_workflow_template_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3029,7 +3005,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "clam", } path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) @@ -3039,7 +3015,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = WorkflowTemplateServiceClient.common_folder_path(folder) assert expected == actual @@ -3047,7 +3023,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "octopus", } path = WorkflowTemplateServiceClient.common_folder_path(**expected) @@ -3057,7 +3033,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = WorkflowTemplateServiceClient.common_organization_path(organization) assert expected == actual @@ -3065,7 +3041,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "nudibranch", } path = WorkflowTemplateServiceClient.common_organization_path(**expected) @@ -3075,7 +3051,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = WorkflowTemplateServiceClient.common_project_path(project) assert expected == actual @@ -3083,7 +3059,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "mussel", } path = WorkflowTemplateServiceClient.common_project_path(**expected) @@ -3093,8 +3069,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -3104,8 +3080,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "scallop", + "location": "abalone", } path = WorkflowTemplateServiceClient.common_location_path(**expected)