Skip to content

Commit

Permalink
feat: Add ZSTD compression as an option for Arrow proto changes
Browse files Browse the repository at this point in the history
* feat: Add ZSTD compression as an option for Arrow.

Committer: @emkornfield
PiperOrigin-RevId: 374220891

Source-Author: Google APIs <noreply@google.com>
Source-Date: Mon May 17 10:03:14 2021 -0700
Source-Repo: googleapis/googleapis
Source-Sha: 23efea9fc7bedfe53b24295ed84b5f873606edcb
Source-Link: googleapis/googleapis@23efea9

* chore: release gapic-generator-java v1.0.5

Committer: @miraleung
PiperOrigin-RevId: 374252908

Source-Author: Google APIs <noreply@google.com>
Source-Date: Mon May 17 12:23:32 2021 -0700
Source-Repo: googleapis/googleapis
Source-Sha: 131ae3e375c05856d7d77cd146dc2af92650eb38
Source-Link: googleapis/googleapis@131ae3e
  • Loading branch information
yoshi-automation committed May 18, 2021
1 parent 1fcc01d commit d910a89
Show file tree
Hide file tree
Showing 10 changed files with 275 additions and 132 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,13 @@ public final Storage.ReadSession createReadSession(
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.CreateReadSessionRequest request =
* Storage.CreateReadSessionRequest.newBuilder()
* .setTableReference(TableReferenceProto.TableReference.newBuilder().build())
* .setParent(ProjectName.of("[PROJECT]").toString())
* .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
* .setRequestedStreams(1017221410)
* .setReadOptions(ReadOptions.TableReadOptions.newBuilder().build())
* .setFormat(Storage.DataFormat.forNumber(0))
* .setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
* .build();
* Storage.ReadSession response = baseBigQueryStorageClient.createReadSession(request);
* }
Expand Down Expand Up @@ -296,8 +301,13 @@ public final Storage.ReadSession createReadSession(Storage.CreateReadSessionRequ
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.CreateReadSessionRequest request =
* Storage.CreateReadSessionRequest.newBuilder()
* .setTableReference(TableReferenceProto.TableReference.newBuilder().build())
* .setParent(ProjectName.of("[PROJECT]").toString())
* .setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
* .setRequestedStreams(1017221410)
* .setReadOptions(ReadOptions.TableReadOptions.newBuilder().build())
* .setFormat(Storage.DataFormat.forNumber(0))
* .setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
* .build();
* ApiFuture<Storage.ReadSession> future =
* baseBigQueryStorageClient.createReadSessionCallable().futureCall(request);
Expand Down Expand Up @@ -326,7 +336,10 @@ public final Storage.ReadSession createReadSession(Storage.CreateReadSessionRequ
*
* <pre>{@code
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.ReadRowsRequest request = Storage.ReadRowsRequest.newBuilder().build();
* Storage.ReadRowsRequest request =
* Storage.ReadRowsRequest.newBuilder()
* .setReadPosition(Storage.StreamPosition.newBuilder().build())
* .build();
* ServerStream<Storage.ReadRowsResponse> stream =
* baseBigQueryStorageClient.readRowsCallable().call(request);
* for (Storage.ReadRowsResponse response : stream) {
Expand Down Expand Up @@ -383,6 +396,7 @@ public final Storage.BatchCreateReadSessionStreamsResponse batchCreateReadSessio
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.BatchCreateReadSessionStreamsRequest request =
* Storage.BatchCreateReadSessionStreamsRequest.newBuilder()
* .setSession(Storage.ReadSession.newBuilder().build())
* .setRequestedStreams(1017221410)
* .build();
* Storage.BatchCreateReadSessionStreamsResponse response =
Expand All @@ -409,6 +423,7 @@ public final Storage.BatchCreateReadSessionStreamsResponse batchCreateReadSessio
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.BatchCreateReadSessionStreamsRequest request =
* Storage.BatchCreateReadSessionStreamsRequest.newBuilder()
* .setSession(Storage.ReadSession.newBuilder().build())
* .setRequestedStreams(1017221410)
* .build();
* ApiFuture<Storage.BatchCreateReadSessionStreamsResponse> future =
Expand Down Expand Up @@ -475,7 +490,10 @@ public final void finalizeStream(Storage.Stream stream) {
*
* <pre>{@code
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.FinalizeStreamRequest request = Storage.FinalizeStreamRequest.newBuilder().build();
* Storage.FinalizeStreamRequest request =
* Storage.FinalizeStreamRequest.newBuilder()
* .setStream(Storage.Stream.newBuilder().build())
* .build();
* baseBigQueryStorageClient.finalizeStream(request);
* }
* }</pre>
Expand Down Expand Up @@ -505,7 +523,10 @@ public final void finalizeStream(Storage.FinalizeStreamRequest request) {
*
* <pre>{@code
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.FinalizeStreamRequest request = Storage.FinalizeStreamRequest.newBuilder().build();
* Storage.FinalizeStreamRequest request =
* Storage.FinalizeStreamRequest.newBuilder()
* .setStream(Storage.Stream.newBuilder().build())
* .build();
* ApiFuture<Empty> future =
* baseBigQueryStorageClient.finalizeStreamCallable().futureCall(request);
* // Do something.
Expand Down Expand Up @@ -569,7 +590,10 @@ public final Storage.SplitReadStreamResponse splitReadStream(Storage.Stream orig
* <pre>{@code
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.SplitReadStreamRequest request =
* Storage.SplitReadStreamRequest.newBuilder().setFraction(-1653751294).build();
* Storage.SplitReadStreamRequest.newBuilder()
* .setOriginalStream(Storage.Stream.newBuilder().build())
* .setFraction(-1653751294)
* .build();
* Storage.SplitReadStreamResponse response = baseBigQueryStorageClient.splitReadStream(request);
* }
* }</pre>
Expand Down Expand Up @@ -601,7 +625,10 @@ public final Storage.SplitReadStreamResponse splitReadStream(
* <pre>{@code
* try (BaseBigQueryStorageClient baseBigQueryStorageClient = BaseBigQueryStorageClient.create()) {
* Storage.SplitReadStreamRequest request =
* Storage.SplitReadStreamRequest.newBuilder().setFraction(-1653751294).build();
* Storage.SplitReadStreamRequest.newBuilder()
* .setOriginalStream(Storage.Stream.newBuilder().build())
* .setFraction(-1653751294)
* .build();
* ApiFuture<Storage.SplitReadStreamResponse> future =
* baseBigQueryStorageClient.splitReadStreamCallable().futureCall(request);
* // Do something.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,10 @@
import com.google.api.gax.rpc.StatusCode;
import com.google.protobuf.AbstractMessage;
import com.google.protobuf.Empty;
import com.google.protobuf.Timestamp;
import io.grpc.StatusRuntimeException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
Expand Down Expand Up @@ -82,7 +84,15 @@ public void tearDown() throws Exception {

@Test
public void createReadSessionTest() throws Exception {
Storage.ReadSession expectedResponse = Storage.ReadSession.newBuilder().build();
Storage.ReadSession expectedResponse =
Storage.ReadSession.newBuilder()
.setName(ReadSessionName.of("[PROJECT]", "[LOCATION]", "[SESSION]").toString())
.setExpireTime(Timestamp.newBuilder().build())
.addAllStreams(new ArrayList<Storage.Stream>())
.setTableReference(TableReferenceProto.TableReference.newBuilder().build())
.setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
.setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
.build();
mockBigQueryStorage.addResponse(expectedResponse);

TableReferenceProto.TableReference tableReference =
Expand Down Expand Up @@ -127,7 +137,15 @@ public void createReadSessionExceptionTest() throws Exception {

@Test
public void createReadSessionTest2() throws Exception {
Storage.ReadSession expectedResponse = Storage.ReadSession.newBuilder().build();
Storage.ReadSession expectedResponse =
Storage.ReadSession.newBuilder()
.setName(ReadSessionName.of("[PROJECT]", "[LOCATION]", "[SESSION]").toString())
.setExpireTime(Timestamp.newBuilder().build())
.addAllStreams(new ArrayList<Storage.Stream>())
.setTableReference(TableReferenceProto.TableReference.newBuilder().build())
.setTableModifiers(TableReferenceProto.TableModifiers.newBuilder().build())
.setShardingStrategy(Storage.ShardingStrategy.forNumber(0))
.build();
mockBigQueryStorage.addResponse(expectedResponse);

TableReferenceProto.TableReference tableReference =
Expand Down Expand Up @@ -172,9 +190,17 @@ public void createReadSessionExceptionTest2() throws Exception {

@Test
public void readRowsTest() throws Exception {
Storage.ReadRowsResponse expectedResponse = Storage.ReadRowsResponse.newBuilder().build();
Storage.ReadRowsResponse expectedResponse =
Storage.ReadRowsResponse.newBuilder()
.setRowCount(1340416618)
.setStatus(Storage.StreamStatus.newBuilder().build())
.setThrottleStatus(Storage.ThrottleStatus.newBuilder().build())
.build();
mockBigQueryStorage.addResponse(expectedResponse);
Storage.ReadRowsRequest request = Storage.ReadRowsRequest.newBuilder().build();
Storage.ReadRowsRequest request =
Storage.ReadRowsRequest.newBuilder()
.setReadPosition(Storage.StreamPosition.newBuilder().build())
.build();

MockStreamObserver<Storage.ReadRowsResponse> responseObserver = new MockStreamObserver<>();

Expand All @@ -191,7 +217,10 @@ public void readRowsTest() throws Exception {
public void readRowsExceptionTest() throws Exception {
StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT);
mockBigQueryStorage.addException(exception);
Storage.ReadRowsRequest request = Storage.ReadRowsRequest.newBuilder().build();
Storage.ReadRowsRequest request =
Storage.ReadRowsRequest.newBuilder()
.setReadPosition(Storage.StreamPosition.newBuilder().build())
.build();

MockStreamObserver<Storage.ReadRowsResponse> responseObserver = new MockStreamObserver<>();

Expand All @@ -212,7 +241,9 @@ public void readRowsExceptionTest() throws Exception {
@Test
public void batchCreateReadSessionStreamsTest() throws Exception {
Storage.BatchCreateReadSessionStreamsResponse expectedResponse =
Storage.BatchCreateReadSessionStreamsResponse.newBuilder().build();
Storage.BatchCreateReadSessionStreamsResponse.newBuilder()
.addAllStreams(new ArrayList<Storage.Stream>())
.build();
mockBigQueryStorage.addResponse(expectedResponse);

Storage.ReadSession session = Storage.ReadSession.newBuilder().build();
Expand Down Expand Up @@ -288,7 +319,10 @@ public void finalizeStreamExceptionTest() throws Exception {
@Test
public void splitReadStreamTest() throws Exception {
Storage.SplitReadStreamResponse expectedResponse =
Storage.SplitReadStreamResponse.newBuilder().build();
Storage.SplitReadStreamResponse.newBuilder()
.setPrimaryStream(Storage.Stream.newBuilder().build())
.setRemainderStream(Storage.Stream.newBuilder().build())
.build();
mockBigQueryStorage.addResponse(expectedResponse);

Storage.Stream originalStream = Storage.Stream.newBuilder().build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ ".proto\022 google.cloud.bigquery.storage.v1"
+ "\"(\n\013ArrowSchema\022\031\n\021serialized_schema\030\001 \001"
+ "(\014\"F\n\020ArrowRecordBatch\022\037\n\027serialized_rec"
+ "ord_batch\030\001 \001(\014\022\021\n\trow_count\030\002 \001(\003\"\305\001\n\031A"
+ "ord_batch\030\001 \001(\014\022\021\n\trow_count\030\002 \001(\003\"\317\001\n\031A"
+ "rrowSerializationOptions\022h\n\022buffer_compr"
+ "ession\030\002 \001(\0162L.google.cloud.bigquery.sto"
+ "rage.v1.ArrowSerializationOptions.Compre"
+ "ssionCodec\">\n\020CompressionCodec\022\033\n\027COMPRE"
+ "SSION_UNSPECIFIED\020\000\022\r\n\tLZ4_FRAME\020\001B\303\001\n$c"
+ "om.google.cloud.bigquery.storage.v1B\nArr"
+ "owProtoP\001ZGgoogle.golang.org/genproto/go"
+ "ogleapis/cloud/bigquery/storage/v1;stora"
+ "ge\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 "
+ "Google\\Cloud\\BigQuery\\Storage\\V1b\006proto3"
+ "ssionCodec\"H\n\020CompressionCodec\022\033\n\027COMPRE"
+ "SSION_UNSPECIFIED\020\000\022\r\n\tLZ4_FRAME\020\001\022\010\n\004ZS"
+ "TD\020\002B\303\001\n$com.google.cloud.bigquery.stora"
+ "ge.v1B\nArrowProtoP\001ZGgoogle.golang.org/g"
+ "enproto/googleapis/cloud/bigquery/storag"
+ "e/v1;storage\252\002 Google.Cloud.BigQuery.Sto"
+ "rage.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\"
+ "V1b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,16 @@ public enum CompressionCodec implements com.google.protobuf.ProtocolMessageEnum
* <code>LZ4_FRAME = 1;</code>
*/
LZ4_FRAME(1),
/**
*
*
* <pre>
* Zstandard compression.
* </pre>
*
* <code>ZSTD = 2;</code>
*/
ZSTD(2),
UNRECOGNIZED(-1),
;

Expand All @@ -165,6 +175,16 @@ public enum CompressionCodec implements com.google.protobuf.ProtocolMessageEnum
* <code>LZ4_FRAME = 1;</code>
*/
public static final int LZ4_FRAME_VALUE = 1;
/**
*
*
* <pre>
* Zstandard compression.
* </pre>
*
* <code>ZSTD = 2;</code>
*/
public static final int ZSTD_VALUE = 2;

public final int getNumber() {
if (this == UNRECOGNIZED) {
Expand Down Expand Up @@ -194,6 +214,8 @@ public static CompressionCodec forNumber(int value) {
return COMPRESSION_UNSPECIFIED;
case 1:
return LZ4_FRAME;
case 2:
return ZSTD;
default:
return null;
}
Expand Down
Loading

0 comments on commit d910a89

Please sign in to comment.