Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dataproc: copy lintified proto files (via synth). #7465

Merged
merged 1 commit into from
Mar 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 54 additions & 37 deletions dataproc/google/cloud/dataproc_v1/proto/clusters.proto
Original file line number Diff line number Diff line change
Expand Up @@ -29,28 +29,30 @@ option java_multiple_files = true;
option java_outer_classname = "ClustersProto";
option java_package = "com.google.cloud.dataproc.v1";


// The ClusterControllerService provides methods to manage clusters
// of Compute Engine instances.
service ClusterController {
// Creates a cluster in a project.
rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
rpc CreateCluster(CreateClusterRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/projects/{project_id}/regions/{region}/clusters"
body: "cluster"
};
}

// Updates a cluster in a project.
rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) {
rpc UpdateCluster(UpdateClusterRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
body: "cluster"
};
}

// Deletes a cluster in a project.
rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) {
rpc DeleteCluster(DeleteClusterRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
};
Expand All @@ -73,7 +75,8 @@ service ClusterController {
// Gets cluster diagnostic information.
// After the operation completes, the Operation.response field
// contains `DiagnoseClusterOutputLocation`.
rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) {
rpc DiagnoseCluster(DiagnoseClusterRequest)
returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose"
body: "*"
Expand All @@ -99,8 +102,9 @@ message Cluster {
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// No more than 32 labels can be associated with a cluster.
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a cluster.
map<string, string> labels = 8;

// Output only. Cluster status.
Expand All @@ -115,8 +119,8 @@ message Cluster {

// Contains cluster daemon metrics such as HDFS and YARN stats.
//
// **Beta Feature**: This report is available for testing purposes only. It may
// be changed before final release.
// **Beta Feature**: This report is available for testing purposes only. It
// may be changed before final release.
ClusterMetrics metrics = 9;
}

Expand Down Expand Up @@ -152,9 +156,11 @@ message ClusterConfig {
// Optional. Commands to execute on each node after config is
// completed. By default, executables are run on master and all worker nodes.
// You can test a node's `role` metadata to run an executable on
// a master or worker node, as shown below using `curl` (you can also use `wget`):
// a master or worker node, as shown below using `curl` (you can also use
// `wget`):
//
// ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
// ROLE=$(curl -H Metadata-Flavor:Google
// http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
// if [[ "${ROLE}" == 'Master' ]]; then
// ... master specific actions ...
// else
Expand Down Expand Up @@ -213,11 +219,11 @@ message GceClusterConfig {
string subnetwork_uri = 6;

// Optional. If true, all instances in the cluster will only have internal IP
// addresses. By default, clusters are not restricted to internal IP addresses,
// and will have ephemeral external IP addresses assigned to each instance.
// This `internal_ip_only` restriction can only be enabled for subnetwork
// enabled networks, and all off-cluster dependencies must be configured to be
// accessible without external IP addresses.
// addresses. By default, clusters are not restricted to internal IP
// addresses, and will have ephemeral external IP addresses assigned to each
// instance. This `internal_ip_only` restriction can only be enabled for
// subnetwork enabled networks, and all off-cluster dependencies must be
// configured to be accessible without external IP addresses.
bool internal_ip_only = 7;

// Optional. The service account of the instances. Defaults to the default
Expand All @@ -227,7 +233,8 @@ message GceClusterConfig {
// * roles/logging.logWriter
// * roles/storage.objectAdmin
//
// (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
// (see
// https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
// for more information).
// Example: `[account_id]@[project_id].iam.gserviceaccount.com`
string service_account = 8;
Expand All @@ -253,7 +260,8 @@ message GceClusterConfig {
repeated string tags = 4;

// The Compute Engine metadata entries to add to all instances (see
// [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
// [Project and instance
// metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
map<string, string> metadata = 5;
}

Expand Down Expand Up @@ -282,15 +290,17 @@ message InstanceGroupConfig {
// * `n1-standard-2`
//
// **Auto Zone Exception**: If you are using the Cloud Dataproc
// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// [Auto Zone
// Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the machine type
// resource, for example, `n1-standard-2`.
string machine_type_uri = 4;

// Optional. Disk option config settings.
DiskConfig disk_config = 5;

// Optional. Specifies that this instance group contains preemptible instances.
// Optional. Specifies that this instance group contains preemptible
// instances.
bool is_preemptible = 6;

// Output only. The config for Compute Engine Instance Group
Expand Down Expand Up @@ -321,7 +331,8 @@ message ManagedGroupConfig {
message AcceleratorConfig {
// Full URL, partial URI, or short name of the accelerator type resource to
// expose to this instance. See
// [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
// [Compute Engine
// AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
//
// Examples:
//
Expand All @@ -330,7 +341,8 @@ message AcceleratorConfig {
// * `nvidia-tesla-k80`
//
// **Auto Zone Exception**: If you are using the Cloud Dataproc
// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// [Auto Zone
// Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the accelerator type
// resource, for example, `nvidia-tesla-k80`.
string accelerator_type_uri = 1;
Expand Down Expand Up @@ -429,10 +441,12 @@ message ClusterStatus {

// Specifies the selection and config of software inside the cluster.
message SoftwareConfig {
// Optional. The version of software inside the cluster. It must be one of the supported
// [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
// Optional. The version of software inside the cluster. It must be one of the
// supported [Cloud Dataproc
// Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
// such as "1.2" (including a subminor version, such as "1.2.29"), or the
// ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
// ["preview"
// version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
// If unspecified, it defaults to the latest version.
string image_version = 1;

Expand Down Expand Up @@ -482,10 +496,11 @@ message CreateClusterRequest {
Cluster cluster = 2;

// Optional. A unique id used to identify the request. If the server
// receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests with the same
// id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
// is returned.
// receives two
// [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
// requests with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
Expand Down Expand Up @@ -570,10 +585,11 @@ message UpdateClusterRequest {
google.protobuf.FieldMask update_mask = 4;

// Optional. A unique id used to identify the request. If the server
// receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests with the same
// id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
// backend is returned.
// receives two
// [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
// requests with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
Expand All @@ -600,10 +616,11 @@ message DeleteClusterRequest {
string cluster_uuid = 4;

// Optional. A unique id used to identify the request. If the server
// receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests with the same
// id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
// backend is returned.
// receives two
// [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
// requests with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
Expand Down
8 changes: 4 additions & 4 deletions dataproc/google/cloud/dataproc_v1/proto/clusters_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

45 changes: 25 additions & 20 deletions dataproc/google/cloud/dataproc_v1/proto/jobs.proto
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ option java_multiple_files = true;
option java_outer_classname = "JobsProto";
option java_package = "com.google.cloud.dataproc.v1";


// The JobController provides methods to manage jobs.
service JobController {
// Submits a job to a cluster.
Expand Down Expand Up @@ -62,7 +61,8 @@ service JobController {

// Starts a job cancellation request. To access the job resource
// after cancellation, call
// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
// [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
// or
// [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
rpc CancelJob(CancelJobRequest) returns (Job) {
option (google.api.http) = {
Expand Down Expand Up @@ -122,8 +122,10 @@ message LoggingConfig {
}

// A Cloud Dataproc job for running
// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
// [Apache Hadoop
// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
// jobs on [Apache Hadoop
// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
message HadoopJob {
// Required. Indicates the location of the driver's main class. Specify
// either the jar file that contains the main class or the main class name.
Expand All @@ -143,8 +145,8 @@ message HadoopJob {
}

// Optional. The arguments to pass to the driver. Do not
// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
// properties, since a collision may occur that causes an incorrect job
// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
// job properties, since a collision may occur that causes an incorrect job
// submission.
repeated string args = 3;

Expand Down Expand Up @@ -178,7 +180,8 @@ message SparkJob {
// Required. The specification of the main method to call to drive the job.
// Specify either the jar file that contains the main class or the main class
// name. To pass both a main jar and a main class in that jar, add the jar to
// `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
// `CommonJob.jar_file_uris`, and then specify the main class name in
// `main_class`.
oneof driver {
// The HCFS URI of the jar file that contains the main class.
string main_jar_file_uri = 1;
Expand Down Expand Up @@ -217,7 +220,8 @@ message SparkJob {
}

// A Cloud Dataproc job for running
// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
// [Apache
// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
// applications on YARN.
message PySparkJob {
// Required. The HCFS URI of the main Python file to use as the driver. Must
Expand Down Expand Up @@ -288,8 +292,8 @@ message HiveJob {
}

// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when executing
// independent parallel queries.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
bool continue_on_failure = 3;

// Optional. Mapping of query variable names to values (equivalent to the
Expand All @@ -308,8 +312,8 @@ message HiveJob {
repeated string jar_file_uris = 6;
}

// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
// queries.
// A Cloud Dataproc job for running [Apache Spark
// SQL](http://spark.apache.org/sql/) queries.
message SparkSqlJob {
// Required. The sequence of Spark SQL queries to execute, specified as
// either an HCFS file URI or as a list of queries.
Expand Down Expand Up @@ -351,8 +355,8 @@ message PigJob {
}

// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when executing
// independent parallel queries.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
bool continue_on_failure = 3;

// Optional. Mapping of query variable names to values (equivalent to the Pig
Expand Down Expand Up @@ -573,8 +577,8 @@ message Job {

// Output only. The collection of YARN applications spun up by this job.
//
// **Beta** Feature: This report is available for testing purposes only. It may
// be changed before final release.
// **Beta** Feature: This report is available for testing purposes only. It
// may be changed before final release.
repeated YarnApplication yarn_applications = 9;

// Output only. A URI pointing to the location of the stdout of the job's
Expand All @@ -590,8 +594,9 @@ message Job {
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// No more than 32 labels can be associated with a job.
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a job.
map<string, string> labels = 18;

// Optional. Job scheduling configuration.
Expand Down Expand Up @@ -629,8 +634,8 @@ message SubmitJobRequest {
Job job = 2;

// Optional. A unique id used to identify the request. If the server
// receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests with the same
// id, then the second request will be ignored and the
// receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
// requests with the same id, then the second request will be ignored and the
// first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
// is returned.
//
Expand Down
3 changes: 2 additions & 1 deletion dataproc/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ def UpdateJob(self, request, context):
def CancelJob(self, request, context):
"""Starts a job cancellation request. To access the job resource
after cancellation, call
[regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
[regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
or
[regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
Expand Down
1 change: 0 additions & 1 deletion dataproc/google/cloud/dataproc_v1/proto/operations.proto
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ option java_multiple_files = true;
option java_outer_classname = "OperationsProto";
option java_package = "com.google.cloud.dataproc.v1";


// The status of the operation.
message ClusterOperationStatus {
// The operation state.
Expand Down
Loading