Skip to content
Permalink
Browse files
feat: add context manager support in client (#328)
- [ ] Regenerate this pull request now.

chore: fix docstring for first attribute of protos

committer: @busunkim96
PiperOrigin-RevId: 401271153

Source-Link: googleapis/googleapis@787f8c9

Source-Link: https://github.com/googleapis/googleapis-gen/commit/81decffe9fc72396a8153e756d1d67a6eecfd620
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODFkZWNmZmU5ZmM3MjM5NmE4MTUzZTc1NmQxZDY3YTZlZWNmZDYyMCJ9
  • Loading branch information
gcf-owl-bot[bot] committed Oct 8, 2021
1 parent 54de06e commit afcf3dcece980698d4b12545bf1a0d45289e41d5
Showing with 391 additions and 16 deletions.
  1. +6 −0 google/cloud/bigquery_storage_v1/services/big_query_read/async_client.py
  2. +14 −4 google/cloud/bigquery_storage_v1/services/big_query_read/client.py
  3. +9 −0 google/cloud/bigquery_storage_v1/services/big_query_read/transports/base.py
  4. +3 −0 google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py
  5. +3 −0 google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc_asyncio.py
  6. +6 −0 google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py
  7. +14 −4 google/cloud/bigquery_storage_v1/services/big_query_write/client.py
  8. +9 −0 google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py
  9. +3 −0 google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc.py
  10. +3 −0 google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc_asyncio.py
  11. +2 −0 google/cloud/bigquery_storage_v1/types/avro.py
  12. +1 −0 google/cloud/bigquery_storage_v1/types/protobuf.py
  13. +17 −0 google/cloud/bigquery_storage_v1/types/storage.py
  14. +3 −0 google/cloud/bigquery_storage_v1/types/stream.py
  15. +1 −0 google/cloud/bigquery_storage_v1/types/table.py
  16. +6 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_read/async_client.py
  17. +14 −4 google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py
  18. +9 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/base.py
  19. +3 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc.py
  20. +3 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py
  21. +6 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py
  22. +14 −4 google/cloud/bigquery_storage_v1beta2/services/big_query_write/client.py
  23. +9 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/base.py
  24. +3 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc.py
  25. +3 −0 google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc_asyncio.py
  26. +2 −0 google/cloud/bigquery_storage_v1beta2/types/avro.py
  27. +1 −0 google/cloud/bigquery_storage_v1beta2/types/protobuf.py
  28. +19 −0 google/cloud/bigquery_storage_v1beta2/types/storage.py
  29. +3 −0 google/cloud/bigquery_storage_v1beta2/types/stream.py
  30. +2 −0 google/cloud/bigquery_storage_v1beta2/types/table.py
  31. +50 −0 tests/unit/gapic/bigquery_storage_v1/test_big_query_read.py
  32. +50 −0 tests/unit/gapic/bigquery_storage_v1/test_big_query_write.py
  33. +50 −0 tests/unit/gapic/bigquery_storage_v1beta2/test_big_query_read.py
  34. +50 −0 tests/unit/gapic/bigquery_storage_v1beta2/test_big_query_write.py
@@ -467,6 +467,12 @@ async def split_read_stream(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -381,10 +381,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def create_read_session(
@@ -660,6 +657,19 @@ def split_read_stream(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -205,6 +205,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def create_read_session(
self,
@@ -346,5 +346,8 @@ def split_read_stream(
)
return self._stubs["split_read_stream"]

def close(self):
self.grpc_channel.close()


__all__ = ("BigQueryReadGrpcTransport",)
@@ -351,5 +351,8 @@ def split_read_stream(
)
return self._stubs["split_read_stream"]

def close(self):
return self.grpc_channel.close()


__all__ = ("BigQueryReadGrpcAsyncIOTransport",)
@@ -700,6 +700,12 @@ async def flush_rows(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -365,10 +365,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def create_write_stream(
@@ -831,6 +828,19 @@ def flush_rows(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -250,6 +250,15 @@ def _prep_wrapped_messages(self, client_info):
),
}

def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()

@property
def create_write_stream(
self,
@@ -444,5 +444,8 @@ def flush_rows(
)
return self._stubs["flush_rows"]

def close(self):
self.grpc_channel.close()


__all__ = ("BigQueryWriteGrpcTransport",)
@@ -448,5 +448,8 @@ def flush_rows(
)
return self._stubs["flush_rows"]

def close(self):
return self.grpc_channel.close()


__all__ = ("BigQueryWriteGrpcAsyncIOTransport",)
@@ -23,6 +23,7 @@

class AvroSchema(proto.Message):
r"""Avro schema.
Attributes:
schema (str):
Json serialized schema, as described at
@@ -34,6 +35,7 @@ class AvroSchema(proto.Message):

class AvroRows(proto.Message):
r"""Avro rows.
Attributes:
serialized_binary_rows (bytes):
Binary serialized rows in a block.
@@ -49,6 +49,7 @@ class ProtoSchema(proto.Message):

class ProtoRows(proto.Message):
r"""
Attributes:
serialized_rows (Sequence[bytes]):
A sequence of rows serialized as a Protocol
@@ -52,6 +52,7 @@

class CreateReadSessionRequest(proto.Message):
r"""Request message for ``CreateReadSession``.
Attributes:
parent (str):
Required. The request project that owns the session, in the
@@ -79,6 +80,7 @@ class CreateReadSessionRequest(proto.Message):

class ReadRowsRequest(proto.Message):
r"""Request message for ``ReadRows``.
Attributes:
read_stream (str):
Required. Stream to read rows from.
@@ -95,6 +97,7 @@ class ReadRowsRequest(proto.Message):

class ThrottleState(proto.Message):
r"""Information on if the current connection is being throttled.
Attributes:
throttle_percent (int):
How much this connection is being throttled.
@@ -107,6 +110,7 @@ class ThrottleState(proto.Message):

class StreamStats(proto.Message):
r"""Estimated stream statistics for a given read Stream.
Attributes:
progress (google.cloud.bigquery_storage_v1.types.StreamStats.Progress):
Represents the progress of the current
@@ -115,6 +119,7 @@ class StreamStats(proto.Message):

class Progress(proto.Message):
r"""
Attributes:
at_response_start (float):
The fraction of rows assigned to the stream that have been
@@ -183,6 +188,7 @@ class ReadRowsResponse(proto.Message):

class SplitReadStreamRequest(proto.Message):
r"""Request message for ``SplitReadStream``.
Attributes:
name (str):
Required. Name of the stream to split.
@@ -207,6 +213,7 @@ class SplitReadStreamRequest(proto.Message):

class SplitReadStreamResponse(proto.Message):
r"""Response message for ``SplitReadStream``.
Attributes:
primary_stream (google.cloud.bigquery_storage_v1.types.ReadStream):
Primary stream, which contains the beginning portion of
@@ -224,6 +231,7 @@ class SplitReadStreamResponse(proto.Message):

class CreateWriteStreamRequest(proto.Message):
r"""Request message for ``CreateWriteStream``.
Attributes:
parent (str):
Required. Reference to the table to which the stream
@@ -303,6 +311,7 @@ class ProtoData(proto.Message):

class AppendRowsResponse(proto.Message):
r"""Response message for ``AppendRows``.
Attributes:
append_result (google.cloud.bigquery_storage_v1.types.AppendRowsResponse.AppendResult):
Result if the append is successful.
@@ -339,6 +348,7 @@ class AppendRowsResponse(proto.Message):

class AppendResult(proto.Message):
r"""AppendResult is returned for successful append requests.
Attributes:
offset (google.protobuf.wrappers_pb2.Int64Value):
The row offset at which the last append
@@ -359,6 +369,7 @@ class AppendResult(proto.Message):

class GetWriteStreamRequest(proto.Message):
r"""Request message for ``GetWriteStreamRequest``.
Attributes:
name (str):
Required. Name of the stream to get, in the form of
@@ -370,6 +381,7 @@ class GetWriteStreamRequest(proto.Message):

class BatchCommitWriteStreamsRequest(proto.Message):
r"""Request message for ``BatchCommitWriteStreams``.
Attributes:
parent (str):
Required. Parent table that all the streams should belong
@@ -386,6 +398,7 @@ class BatchCommitWriteStreamsRequest(proto.Message):

class BatchCommitWriteStreamsResponse(proto.Message):
r"""Response message for ``BatchCommitWriteStreams``.
Attributes:
commit_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which streams were committed in microseconds
@@ -409,6 +422,7 @@ class BatchCommitWriteStreamsResponse(proto.Message):

class FinalizeWriteStreamRequest(proto.Message):
r"""Request message for invoking ``FinalizeWriteStream``.
Attributes:
name (str):
Required. Name of the stream to finalize, in the form of
@@ -420,6 +434,7 @@ class FinalizeWriteStreamRequest(proto.Message):

class FinalizeWriteStreamResponse(proto.Message):
r"""Response message for ``FinalizeWriteStream``.
Attributes:
row_count (int):
Number of rows in the finalized stream.
@@ -430,6 +445,7 @@ class FinalizeWriteStreamResponse(proto.Message):

class FlushRowsRequest(proto.Message):
r"""Request message for ``FlushRows``.
Attributes:
write_stream (str):
Required. The stream that is the target of
@@ -446,6 +462,7 @@ class FlushRowsRequest(proto.Message):

class FlushRowsResponse(proto.Message):
r"""Respond message for ``FlushRows``.
Attributes:
offset (int):
The rows before this offset (including this
@@ -36,6 +36,7 @@ class DataFormat(proto.Enum):

class ReadSession(proto.Message):
r"""Information about the ReadSession.
Attributes:
name (str):
Output only. Unique identifier for the session, in the form
@@ -79,6 +80,7 @@ class ReadSession(proto.Message):

class TableModifiers(proto.Message):
r"""Additional attributes when reading a table.
Attributes:
snapshot_time (google.protobuf.timestamp_pb2.Timestamp):
The snapshot time of the table. If not set,
@@ -91,6 +93,7 @@ class TableModifiers(proto.Message):

class TableReadOptions(proto.Message):
r"""Options dictating how we read a table.
Attributes:
selected_fields (Sequence[str]):
Names of the fields in the table that should be read. If
@@ -24,6 +24,7 @@

class TableSchema(proto.Message):
r"""Schema of a table.
Attributes:
fields (Sequence[google.cloud.bigquery_storage_v1.types.TableFieldSchema]):
Describes the fields in a table.
@@ -469,6 +469,12 @@ async def split_read_stream(
# Done; return the response.
return response

async def __aenter__(self):
return self

async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
@@ -383,10 +383,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
always_use_jwt_access=True,
)

def create_read_session(
@@ -662,6 +659,19 @@ def split_read_stream(
# Done; return the response.
return response

def __enter__(self):
return self

def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()


try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(

0 comments on commit afcf3dc

Please sign in to comment.