Skip to content

Commit

Permalink
docs: clarify size limitations for AppendRowsRequest (#1714)
Browse files Browse the repository at this point in the history
- [ ] Regenerate this pull request now.

chore: add preferred_min_stream_count to CreateReadSessionRequest
chore: add write_stream to AppendRowsResponse

PiperOrigin-RevId: 463602530

Source-Link: googleapis/googleapis@d33b3fa

Source-Link: googleapis/googleapis-gen@90995f6
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTA5OTVmNjQzM2QwZWNkMjkwZjE4NjE2OGNlOTU3ZDZhMGRiOWM2OCJ9
  • Loading branch information
gcf-owl-bot[bot] committed Jul 27, 2022
1 parent 5171898 commit ed3fe1f
Show file tree
Hide file tree
Showing 9 changed files with 522 additions and 150 deletions.
Expand Up @@ -291,6 +291,7 @@ public final ReadSession createReadSession(
* .setParent(ProjectName.of("[PROJECT]").toString())
* .setReadSession(ReadSession.newBuilder().build())
* .setMaxStreamCount(940837515)
* .setPreferredMinStreamCount(-1905507237)
* .build();
* ReadSession response = baseBigQueryReadClient.createReadSession(request);
* }
Expand Down Expand Up @@ -333,6 +334,7 @@ public final ReadSession createReadSession(CreateReadSessionRequest request) {
* .setParent(ProjectName.of("[PROJECT]").toString())
* .setReadSession(ReadSession.newBuilder().build())
* .setMaxStreamCount(940837515)
* .setPreferredMinStreamCount(-1905507237)
* .build();
* ApiFuture<ReadSession> future =
* baseBigQueryReadClient.createReadSessionCallable().futureCall(request);
Expand Down
Expand Up @@ -177,6 +177,7 @@ public void appendRowsTest() throws Exception {
AppendRowsResponse.newBuilder()
.setUpdatedSchema(TableSchema.newBuilder().build())
.addAllRowErrors(new ArrayList<RowError>())
.setWriteStream("writeStream1412231231")
.build();
mockBigQueryWrite.addResponse(expectedResponse);
AppendRowsRequest request =
Expand Down
Expand Up @@ -26,6 +26,8 @@
* Due to the nature of AppendRows being a bidirectional streaming RPC, certain
* parts of the AppendRowsRequest need only be specified for the first request
* sent each time the gRPC network connection is opened/reopened.
* The size of a single AppendRowsRequest must be less than 10 MB in size.
* Requests larger than this return an error, typically `INVALID_ARGUMENT`.
* </pre>
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest}
Expand Down Expand Up @@ -1827,6 +1829,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
* Due to the nature of AppendRows being a bidirectional streaming RPC, certain
* parts of the AppendRowsRequest need only be specified for the first request
* sent each time the gRPC network connection is opened/reopened.
* The size of a single AppendRowsRequest must be less than 10 MB in size.
* Requests larger than this return an error, typically `INVALID_ARGUMENT`.
* </pre>
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest}
Expand Down
Expand Up @@ -39,6 +39,7 @@ private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder<?> bui

private AppendRowsResponse() {
rowErrors_ = java.util.Collections.emptyList();
writeStream_ = "";
}

@java.lang.Override
Expand Down Expand Up @@ -136,6 +137,13 @@ private AppendRowsResponse(
com.google.cloud.bigquery.storage.v1.RowError.parser(), extensionRegistry));
break;
}
case 42:
{
java.lang.String s = input.readStringRequireUtf8();

writeStream_ = s;
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
Expand Down Expand Up @@ -1282,6 +1290,57 @@ public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuil
return rowErrors_.get(index);
}

public static final int WRITE_STREAM_FIELD_NUMBER = 5;
private volatile java.lang.Object writeStream_;
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The writeStream.
*/
@java.lang.Override
public java.lang.String getWriteStream() {
java.lang.Object ref = writeStream_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
writeStream_ = s;
return s;
}
}
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The bytes for writeStream.
*/
@java.lang.Override
public com.google.protobuf.ByteString getWriteStreamBytes() {
java.lang.Object ref = writeStream_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
writeStream_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}

private byte memoizedIsInitialized = -1;

@java.lang.Override
Expand Down Expand Up @@ -1309,6 +1368,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < rowErrors_.size(); i++) {
output.writeMessage(4, rowErrors_.get(i));
}
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 5, writeStream_);
}
unknownFields.writeTo(output);
}

Expand All @@ -1334,6 +1396,9 @@ public int getSerializedSize() {
for (int i = 0; i < rowErrors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, rowErrors_.get(i));
}
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, writeStream_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
Expand All @@ -1355,6 +1420,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false;
}
if (!getRowErrorsList().equals(other.getRowErrorsList())) return false;
if (!getWriteStream().equals(other.getWriteStream())) return false;
if (!getResponseCase().equals(other.getResponseCase())) return false;
switch (responseCase_) {
case 1:
Expand Down Expand Up @@ -1385,6 +1451,8 @@ public int hashCode() {
hash = (37 * hash) + ROW_ERRORS_FIELD_NUMBER;
hash = (53 * hash) + getRowErrorsList().hashCode();
}
hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER;
hash = (53 * hash) + getWriteStream().hashCode();
switch (responseCase_) {
case 1:
hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER;
Expand Down Expand Up @@ -1557,6 +1625,8 @@ public Builder clear() {
} else {
rowErrorsBuilder_.clear();
}
writeStream_ = "";

responseCase_ = 0;
response_ = null;
return this;
Expand Down Expand Up @@ -1615,6 +1685,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() {
} else {
result.rowErrors_ = rowErrorsBuilder_.build();
}
result.writeStream_ = writeStream_;
result.responseCase_ = responseCase_;
onBuilt();
return result;
Expand Down Expand Up @@ -1696,6 +1767,10 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsResponse
}
}
}
if (!other.getWriteStream().isEmpty()) {
writeStream_ = other.writeStream_;
onChanged();
}
switch (other.getResponseCase()) {
case APPEND_RESULT:
{
Expand Down Expand Up @@ -2899,6 +2974,117 @@ public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder
return rowErrorsBuilder_;
}

private java.lang.Object writeStream_ = "";
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The writeStream.
*/
public java.lang.String getWriteStream() {
java.lang.Object ref = writeStream_;
if (!(ref instanceof java.lang.String)) {
com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
writeStream_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The bytes for writeStream.
*/
public com.google.protobuf.ByteString getWriteStreamBytes() {
java.lang.Object ref = writeStream_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
writeStream_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @param value The writeStream to set.
* @return This builder for chaining.
*/
public Builder setWriteStream(java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}

writeStream_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return This builder for chaining.
*/
public Builder clearWriteStream() {

writeStream_ = getDefaultInstance().getWriteStream();
onChanged();
return this;
}
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @param value The bytes for writeStream to set.
* @return This builder for chaining.
*/
public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
checkByteStringIsUtf8(value);

writeStream_ = value;
onChanged();
return this;
}

@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
Expand Down
Expand Up @@ -239,5 +239,32 @@ public interface AppendRowsResponseOrBuilder
*/
com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index);

/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The writeStream.
*/
java.lang.String getWriteStream();
/**
*
*
* <pre>
* The target of the append operation. Matches the write_stream in the
* corresponding request.
* </pre>
*
* <code>string write_stream = 5;</code>
*
* @return The bytes for writeStream.
*/
com.google.protobuf.ByteString getWriteStreamBytes();

public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.ResponseCase getResponseCase();
}

0 comments on commit ed3fe1f

Please sign in to comment.