Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: improve documentation for write client #1560

Merged
merged 4 commits into from Mar 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -294,6 +294,12 @@ public final UnaryCallable<CreateWriteStreamRequest, WriteStream> createWriteStr
* `BatchCommitWriteStreams` rpc.
* </ul>
*
* <p>Note: For users coding against the gRPC api directly, it may be necessary to supply the
* x-goog-request-params system parameter with `write_stream=&lt;full_write_stream_name&gt;`.
*
* <p>More information about system parameters:
* https://cloud.google.com/apis/docs/system-parameters
*
* <p>Sample code:
*
* <pre>{@code
Expand Down Expand Up @@ -523,7 +529,36 @@ public final FinalizeWriteStreamResponse finalizeWriteStream(FinalizeWriteStream
*
* <pre>{@code
* try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
* String parent = "parent-995424086";
* TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
* BatchCommitWriteStreamsResponse response =
* bigQueryWriteClient.batchCommitWriteStreams(parent);
* }
* }</pre>
*
* @param parent Required. Parent table that all the streams should belong to, in the form of
* `projects/{project}/datasets/{dataset}/tables/{table}`.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(TableName parent) {
BatchCommitWriteStreamsRequest request =
BatchCommitWriteStreamsRequest.newBuilder()
.setParent(parent == null ? null : parent.toString())
.build();
return batchCommitWriteStreams(request);
}

// AUTO-GENERATED DOCUMENTATION AND METHOD.
/**
* Atomically commits a group of `PENDING` streams that belong to the same `parent` table.
*
* <p>Streams must be finalized before commit and cannot be committed multiple times. Once a
* stream is committed, data in the stream becomes available for read operations.
*
* <p>Sample code:
*
* <pre>{@code
* try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
* String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
* BatchCommitWriteStreamsResponse response =
* bigQueryWriteClient.batchCommitWriteStreams(parent);
* }
Expand Down Expand Up @@ -552,7 +587,7 @@ public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(String pare
* try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
* BatchCommitWriteStreamsRequest request =
* BatchCommitWriteStreamsRequest.newBuilder()
* .setParent("parent-995424086")
* .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
* .addAllWriteStreams(new ArrayList<String>())
* .build();
* BatchCommitWriteStreamsResponse response =
Expand Down Expand Up @@ -581,7 +616,7 @@ public final BatchCommitWriteStreamsResponse batchCommitWriteStreams(
* try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
* BatchCommitWriteStreamsRequest request =
* BatchCommitWriteStreamsRequest.newBuilder()
* .setParent("parent-995424086")
* .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
* .addAllWriteStreams(new ArrayList<String>())
* .build();
* ApiFuture<BatchCommitWriteStreamsResponse> future =
Expand Down
Expand Up @@ -32,7 +32,7 @@
"methods": ["appendRowsCallable"]
},
"BatchCommitWriteStreams": {
"methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"]
"methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"]
},
"CreateWriteStream": {
"methods": ["createWriteStream", "createWriteStream", "createWriteStream", "createWriteStreamCallable"]
Expand Down
Expand Up @@ -214,6 +214,11 @@ public static class Builder extends StubSettings.Builder<BigQueryReadStubSetting
definitions.put(
"retry_policy_1_codes",
ImmutableSet.copyOf(Lists.<StatusCode.Code>newArrayList(StatusCode.Code.UNAVAILABLE)));
definitions.put(
"retry_policy_2_codes",
ImmutableSet.copyOf(
Lists.<StatusCode.Code>newArrayList(
StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE)));
RETRYABLE_CODE_DEFINITIONS = definitions.build();
}

Expand Down Expand Up @@ -244,6 +249,17 @@ public static class Builder extends StubSettings.Builder<BigQueryReadStubSetting
.setTotalTimeout(Duration.ofMillis(86400000L))
.build();
definitions.put("retry_policy_1_params", settings);
settings =
RetrySettings.newBuilder()
.setInitialRetryDelay(Duration.ofMillis(100L))
.setRetryDelayMultiplier(1.3)
.setMaxRetryDelay(Duration.ofMillis(60000L))
.setInitialRpcTimeout(Duration.ofMillis(600000L))
.setRpcTimeoutMultiplier(1.0)
.setMaxRpcTimeout(Duration.ofMillis(600000L))
.setTotalTimeout(Duration.ofMillis(600000L))
.build();
definitions.put("retry_policy_2_params", settings);
RETRY_PARAM_DEFINITIONS = definitions.build();
}

Expand Down Expand Up @@ -302,8 +318,8 @@ private static Builder initDefaults(Builder builder) {

builder
.splitReadStreamSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));

return builder;
}
Expand Down
Expand Up @@ -242,7 +242,7 @@ public static class Builder extends StubSettings.Builder<BigQueryWriteStubSettin
ImmutableMap.Builder<String, ImmutableSet<StatusCode.Code>> definitions =
ImmutableMap.builder();
definitions.put(
"retry_policy_2_codes",
"retry_policy_4_codes",
ImmutableSet.copyOf(
Lists.<StatusCode.Code>newArrayList(
StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE)));
Expand All @@ -267,7 +267,7 @@ public static class Builder extends StubSettings.Builder<BigQueryWriteStubSettin
.setMaxRpcTimeout(Duration.ofMillis(600000L))
.setTotalTimeout(Duration.ofMillis(600000L))
.build();
definitions.put("retry_policy_2_params", settings);
definitions.put("retry_policy_4_params", settings);
settings =
RetrySettings.newBuilder()
.setInitialRetryDelay(Duration.ofMillis(100L))
Expand Down Expand Up @@ -341,28 +341,28 @@ private static Builder createDefault() {
private static Builder initDefaults(Builder builder) {
builder
.createWriteStreamSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params"));

builder
.getWriteStreamSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params"));

builder
.finalizeWriteStreamSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params"));

builder
.batchCommitWriteStreamsSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params"));

builder
.flushRowsSettings()
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params"));
.setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_4_codes"))
.setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_4_params"));

return builder;
}
Expand Down
Expand Up @@ -393,6 +393,46 @@ public void batchCommitWriteStreamsTest() throws Exception {
.build();
mockBigQueryWrite.addResponse(expectedResponse);

TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");

BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent);
Assert.assertEquals(expectedResponse, actualResponse);

List<AbstractMessage> actualRequests = mockBigQueryWrite.getRequests();
Assert.assertEquals(1, actualRequests.size());
BatchCommitWriteStreamsRequest actualRequest =
((BatchCommitWriteStreamsRequest) actualRequests.get(0));

Assert.assertEquals(parent.toString(), actualRequest.getParent());
Assert.assertTrue(
channelProvider.isHeaderSent(
ApiClientHeaderProvider.getDefaultApiClientHeaderKey(),
GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
}

@Test
public void batchCommitWriteStreamsExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockBigQueryWrite.addException(exception);

try {
TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
client.batchCommitWriteStreams(parent);
Assert.fail("No exception raised");
} catch (InvalidArgumentException e) {
// Expected exception.
}
}

@Test
public void batchCommitWriteStreamsTest2() throws Exception {
BatchCommitWriteStreamsResponse expectedResponse =
BatchCommitWriteStreamsResponse.newBuilder()
.setCommitTime(Timestamp.newBuilder().build())
.addAllStreamErrors(new ArrayList<StorageError>())
.build();
mockBigQueryWrite.addResponse(expectedResponse);

String parent = "parent-995424086";

BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent);
Expand All @@ -411,7 +451,7 @@ public void batchCommitWriteStreamsTest() throws Exception {
}

@Test
public void batchCommitWriteStreamsExceptionTest() throws Exception {
public void batchCommitWriteStreamsExceptionTest2() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockBigQueryWrite.addException(exception);

Expand Down
Expand Up @@ -424,6 +424,11 @@ public void createWriteStream(
* * For PENDING streams, data is not made visible until the stream itself is
* finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
* committed via the `BatchCommitWriteStreams` rpc.
* Note: For users coding against the gRPC api directly, it may be
* necessary to supply the x-goog-request-params system parameter
* with `write_stream=&lt;full_write_stream_name&gt;`.
* More information about system parameters:
* https://cloud.google.com/apis/docs/system-parameters
* </pre>
*/
public io.grpc.stub.StreamObserver<com.google.cloud.bigquery.storage.v1.AppendRowsRequest>
Expand Down Expand Up @@ -625,6 +630,11 @@ public void createWriteStream(
* * For PENDING streams, data is not made visible until the stream itself is
* finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly
* committed via the `BatchCommitWriteStreams` rpc.
* Note: For users coding against the gRPC api directly, it may be
* necessary to supply the x-goog-request-params system parameter
* with `write_stream=&lt;full_write_stream_name&gt;`.
* More information about system parameters:
* https://cloud.google.com/apis/docs/system-parameters
* </pre>
*/
public io.grpc.stub.StreamObserver<com.google.cloud.bigquery.storage.v1.AppendRowsRequest>
Expand Down
Expand Up @@ -1387,9 +1387,9 @@ public RowsCase getRowsCase() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down Expand Up @@ -1419,9 +1419,9 @@ public java.lang.String getWriteStream() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down Expand Up @@ -2048,9 +2048,9 @@ public Builder clearRows() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down Expand Up @@ -2079,9 +2079,9 @@ public java.lang.String getWriteStream() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down Expand Up @@ -2110,9 +2110,9 @@ public com.google.protobuf.ByteString getWriteStreamBytes() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down Expand Up @@ -2140,9 +2140,9 @@ public Builder setWriteStream(java.lang.String value) {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand All @@ -2166,9 +2166,9 @@ public Builder clearWriteStream() {
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down
Expand Up @@ -32,9 +32,9 @@ public interface AppendRowsRequestOrBuilder
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand All @@ -53,9 +53,9 @@ public interface AppendRowsRequestOrBuilder
* If provided for subsequent requests, it must match the value of the first
* request.
* For explicitly created write streams, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}`
* For the special default stream, the format is:
* `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
* * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
* </pre>
*
* <code>
Expand Down