diff --git a/.kokoro/continuous/bigtable.cfg b/.kokoro/continuous/bigtable.cfg deleted file mode 100644 index de67c4844de9..000000000000 --- a/.kokoro/continuous/bigtable.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "bigtable" -} diff --git a/.kokoro/continuous/firestore.cfg b/.kokoro/continuous/firestore.cfg deleted file mode 100644 index 9c5b0c53ab2d..000000000000 --- a/.kokoro/continuous/firestore.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "firestore" -} diff --git a/.kokoro/continuous/spanner.cfg b/.kokoro/continuous/spanner.cfg deleted file mode 100644 index d986f9d137f9..000000000000 --- a/.kokoro/continuous/spanner.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "spanner" -} diff --git a/.kokoro/continuous/storage.cfg b/.kokoro/continuous/storage.cfg deleted file mode 100644 index 6bd9371ff99d..000000000000 --- a/.kokoro/continuous/storage.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "storage" -} diff --git a/.kokoro/docs/bigtable.cfg b/.kokoro/docs/bigtable.cfg deleted file mode 100644 index de67c4844de9..000000000000 --- a/.kokoro/docs/bigtable.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "bigtable" -} diff --git a/.kokoro/docs/firestore.cfg b/.kokoro/docs/firestore.cfg deleted file mode 100644 index 9c5b0c53ab2d..000000000000 --- a/.kokoro/docs/firestore.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "firestore" -} diff --git a/.kokoro/docs/spanner.cfg b/.kokoro/docs/spanner.cfg deleted file mode 100644 index d986f9d137f9..000000000000 --- a/.kokoro/docs/spanner.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "spanner" -} diff --git a/.kokoro/docs/storage.cfg b/.kokoro/docs/storage.cfg deleted file mode 100644 index 6bd9371ff99d..000000000000 --- a/.kokoro/docs/storage.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "storage" -} diff --git a/.kokoro/presubmit/bigtable.cfg b/.kokoro/presubmit/bigtable.cfg deleted file mode 100644 index de67c4844de9..000000000000 --- a/.kokoro/presubmit/bigtable.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "bigtable" -} diff --git a/.kokoro/presubmit/firestore.cfg b/.kokoro/presubmit/firestore.cfg deleted file mode 100644 index 9c5b0c53ab2d..000000000000 --- a/.kokoro/presubmit/firestore.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "firestore" -} diff --git a/.kokoro/presubmit/spanner.cfg b/.kokoro/presubmit/spanner.cfg deleted file mode 100644 index d986f9d137f9..000000000000 --- a/.kokoro/presubmit/spanner.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "spanner" -} diff --git a/.kokoro/presubmit/storage.cfg b/.kokoro/presubmit/storage.cfg deleted file mode 100644 index 6bd9371ff99d..000000000000 --- a/.kokoro/presubmit/storage.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "storage" -} diff --git a/.kokoro/release/bigtable.cfg b/.kokoro/release/bigtable.cfg deleted file mode 100644 index de67c4844de9..000000000000 --- a/.kokoro/release/bigtable.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "bigtable" -} diff --git a/.kokoro/release/firestore.cfg b/.kokoro/release/firestore.cfg deleted file mode 100644 index 9c5b0c53ab2d..000000000000 --- a/.kokoro/release/firestore.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "firestore" -} diff --git a/.kokoro/release/spanner.cfg b/.kokoro/release/spanner.cfg deleted file mode 100644 index d986f9d137f9..000000000000 --- a/.kokoro/release/spanner.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "spanner" -} diff --git a/.kokoro/release/storage.cfg b/.kokoro/release/storage.cfg deleted file mode 100644 index 6bd9371ff99d..000000000000 --- a/.kokoro/release/storage.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Tell the trampoline which build file to use. -env_vars: { - key: "PACKAGE" - value: "storage" -} diff --git a/README.rst b/README.rst index 441a6b9d3e29..49098ecf3728 100644 --- a/README.rst +++ b/README.rst @@ -47,7 +47,7 @@ The following client libraries have **GA** support: .. _BigQuery Documentation: https://googleapis.dev/python/bigquery/latest .. _Google Cloud Bigtable: https://pypi.org/project/google-cloud-bigtable/ -.. _Bigtable README: https://github.com/googleapis/google-cloud-python/tree/master/bigtable +.. _Bigtable README: https://github.com/googleapis/python-bigtable .. _Bigtable Documentation: https://googleapis.dev/python/bigtable/latest .. _Google Cloud Datastore: https://pypi.org/project/google-cloud-datastore/ @@ -67,7 +67,7 @@ The following client libraries have **GA** support: .. _Pub/Sub Documentation: https://googleapis.dev/python/pubsub/latest .. _Google Cloud Spanner: https://pypi.org/project/google-cloud-spanner -.. _Spanner README: https://github.com/googleapis/google-cloud-python/tree/master/spanner +.. _Spanner README: https://github.com/googleapis/python-spanner .. _Spanner Documentation: https://googleapis.dev/python/spanner/latest .. _Google Cloud Speech to Text: https://pypi.org/project/google-cloud-speech/ @@ -75,7 +75,7 @@ The following client libraries have **GA** support: .. _Speech to Text Documentation: https://googleapis.dev/python/speech/latest .. _Google Cloud Storage: https://pypi.org/project/google-cloud-storage/ -.. _Storage README: https://github.com/googleapis/google-cloud-python/tree/master/storage +.. _Storage README: https://github.com/googleapis/python-storage .. _Storage Documentation: https://googleapis.dev/python/storage/latest .. _Google Cloud Tasks: https://pypi.org/project/google-cloud-tasks/ @@ -123,7 +123,7 @@ The following client libraries have **beta** support: .. _Data Catalog Documentation: https://googleapis.dev/python/datacatalog/latest .. _Google Cloud Firestore: https://pypi.org/project/google-cloud-firestore/ -.. _Firestore README: https://github.com/googleapis/google-cloud-python/tree/master/firestore +.. _Firestore README: https://github.com/googleapis/python-firestore .. _Firestore Documentation: https://googleapis.dev/python/firestore/latest .. _Google Cloud Video Intelligence: https://pypi.org/project/google-cloud-videointelligence diff --git a/bigtable/.coveragerc b/bigtable/.coveragerc deleted file mode 100644 index b178b094aa1d..000000000000 --- a/bigtable/.coveragerc +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[run] -branch = True - -[report] -fail_under = 100 -show_missing = True -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py \ No newline at end of file diff --git a/bigtable/.flake8 b/bigtable/.flake8 deleted file mode 100644 index 0268ecc9c55c..000000000000 --- a/bigtable/.flake8 +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - *_pb2.py - - # Standard linting exemptions. - __pycache__, - .git, - *.pyc, - conf.py diff --git a/bigtable/.repo-metadata.json b/bigtable/.repo-metadata.json deleted file mode 100644 index 956c74b53395..000000000000 --- a/bigtable/.repo-metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "bigtable", - "name_pretty": "Cloud Bigtable", - "product_documentation": "https://cloud.google.com/bigtable", - "client_documentation": "https://googleapis.dev/python/bigtable/latest", - "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", - "release_level": "ga", - "language": "python", - "repo": "googleapis/google-cloud-python", - "distribution_name": "google-cloud-bigtable", - "api_id": "bigtable.googleapis.com", - "requires_billing": true -} \ No newline at end of file diff --git a/bigtable/CHANGELOG.md b/bigtable/CHANGELOG.md deleted file mode 100644 index a0ee45d5b5ff..000000000000 --- a/bigtable/CHANGELOG.md +++ /dev/null @@ -1,367 +0,0 @@ -# Changelog - -[PyPI History][1] - -[1]: https://pypi.org/project/google-cloud-bigtable/#history - -## 1.2.1 - -01-03-2020 10:05 PST - - -### Implementation Changes -- Add ability to use single-row transactions ([#10021](https://github.com/googleapis/google-cloud-python/pull/10021)) - -## 1.2.0 - -12-04-2019 12:21 PST - - -### New Features -- add table level IAM policy controls ([#9877](https://github.com/googleapis/google-cloud-python/pull/9877)) -- add 'client_options' / 'admin_client_options' to Client ([#9517](https://github.com/googleapis/google-cloud-python/pull/9517)) - -### Documentation -- change spacing in docs templates (via synth) ([#9739](https://github.com/googleapis/google-cloud-python/pull/9739)) -- add python 2 sunset banner to documentation ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036)) - -### Internal -- add trailing commas (via synth) ([#9557](https://github.com/googleapis/google-cloud-python/pull/9557)) - -## 1.1.0 - -10-15-2019 06:40 PDT - - -### New Features -- Add IAM Policy methods to table admin client (via synth). ([#9172](https://github.com/googleapis/google-cloud-python/pull/9172)) - -### Dependencies -- Pin 'google-cloud-core >= 1.0.3, < 2.0.0dev'. ([#9445](https://github.com/googleapis/google-cloud-python/pull/9445)) - -### Documentation -- Fix intersphinx reference to requests ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294)) -- Fix misspelling in docs. ([#9184](https://github.com/googleapis/google-cloud-python/pull/9184)) - -## 1.0.0 - -08-28-2019 12:49 PDT - -### Implementation Changes -- Remove send/recv msg size limit (via synth). ([#8979](https://github.com/googleapis/google-cloud-python/pull/8979)) - -### Documentation -- Avoid creating table in 'list_tables' snippet; harden 'delete_instance' snippet. ([#8879](https://github.com/googleapis/google-cloud-python/pull/8879)) -- Add retry for DeadlineExceeded to 'test_bigtable_create_table' snippet. ([#8889](https://github.com/googleapis/google-cloud-python/pull/8889)) -- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035)) - -### Internal / Testing Changes -- Docs: Remove CI for gh-pages, use googleapis.dev for api_core refs. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085)) - -## 0.34.0 - -07-30-2019 10:05 PDT - - -### Implementation Changes -- Pick up changes to GAPIC client configuration (via synth). ([#8724](https://github.com/googleapis/google-cloud-python/pull/8724)) -- Add `Cell.__repr__`. ([#8683](https://github.com/googleapis/google-cloud-python/pull/8683)) -- Increase timeout for app profile update operation. ([#8417](https://github.com/googleapis/google-cloud-python/pull/8417)) - -### New Features -- Add methods returning Separate row types to remove confusion around return types of `row.commit`. ([#8662](https://github.com/googleapis/google-cloud-python/pull/8662)) -- Add `options_` argument to clients' `get_iam_policy` (via synth). ([#8652](https://github.com/googleapis/google-cloud-python/pull/8652)) -- Add `client_options` support, update list method docstrings (via synth). ([#8500](https://github.com/googleapis/google-cloud-python/pull/8500)) - -### Dependencies -- Bump minimum version for google-api-core to 1.14.0. ([#8709](https://github.com/googleapis/google-cloud-python/pull/8709)) -- Update pin for `grpc-google-iam-v1` to 0.12.3+. ([#8647](https://github.com/googleapis/google-cloud-python/pull/8647)) -- Allow kwargs to be passed to `create_channel` (via synth). ([#8458](https://github.com/googleapis/google-cloud-python/pull/8458)) -- Add `PartialRowsData.cancel`. ([#8176](https://github.com/googleapis/google-cloud-python/pull/8176)) - -### Documentation -- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805)) -- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705)) -- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288)) -- Add snppets illustrating use of application profiles. ([#7033](https://github.com/googleapis/google-cloud-python/pull/7033)) - -### Internal / Testing Changes -- Add nox session `docs` to remaining manual clients. ([#8478](https://github.com/googleapis/google-cloud-python/pull/8478)) -- All: Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464)) -- Force timeout for table creation to 90 seconds (in systests). ([#8450](https://github.com/googleapis/google-cloud-python/pull/8450)) -- Plug systest / snippet instance leaks. ([#8416](https://github.com/googleapis/google-cloud-python/pull/8416)) -- Declare encoding as utf-8 in pb2 files (via synth). ([#8346](https://github.com/googleapis/google-cloud-python/pull/8346)) -- Add disclaimer to auto-generated template files (via synth). ([#8308](https://github.com/googleapis/google-cloud-python/pull/8308)) -- Fix coverage in `types.py` (via synth). ([#8149](https://github.com/googleapis/google-cloud-python/pull/8149)) -- Integrate docstring / formatting tweaks (via synth). ([#8138](https://github.com/googleapis/google-cloud-python/pull/8138)) -- Use alabaster theme everwhere. ([#8021](https://github.com/googleapis/google-cloud-python/pull/8021)) - -## 0.33.0 - -05-16-2019 11:51 PDT - - -### Implementation Changes -- Fix typos in deprecation warnings. ([#7858](https://github.com/googleapis/google-cloud-python/pull/7858)) -- Add deprecation warnings for to-be-removed features. ([#7532](https://github.com/googleapis/google-cloud-python/pull/7532)) -- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535)) -- Improve `Policy` interchange w/ JSON, gRPC payloads. ([#7378](https://github.com/googleapis/google-cloud-python/pull/7378)) - -### New Features -- Add support for passing `client_info` to client. ([#7876](https://github.com/googleapis/google-cloud-python/pull/7876)) and ([#7898](https://github.com/googleapis/google-cloud-python/pull/7898)) -- Add `Table.mutation_timeout`, allowing override of config timeouts. ([#7424](https://github.com/googleapis/google-cloud-python/pull/7424)) - -### Dependencies -- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993)) - -### Documentation -- Remove duplicate snippet tags for Delete cluster. ([#7860](https://github.com/googleapis/google-cloud-python/pull/7860)) -- Fix rendering of instance admin snippets. ([#7797](https://github.com/googleapis/google-cloud-python/pull/7797)) -- Avoid leaking instances from snippets. ([#7800](https://github.com/googleapis/google-cloud-python/pull/7800)) -- Fix enum reference in documentation. ([#7724](https://github.com/googleapis/google-cloud-python/pull/7724)) -- Remove duplicate snippets. ([#7528](https://github.com/googleapis/google-cloud-python/pull/7528)) -- Add snippeds for Batcher, RowData, Row Operations, AppendRow. ([#7019](https://github.com/googleapis/google-cloud-python/pull/7019)) -- Add column family snippets. ([#7014](https://github.com/googleapis/google-cloud-python/pull/7014)) -- Add Row Set snippets. ([#7016](https://github.com/googleapis/google-cloud-python/pull/7016)) -- Update client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307)) -- Fix typos in Table docstrings. ([#7261](https://github.com/googleapis/google-cloud-python/pull/7261)) -- Update copyright headers (via synth). ([#7139](https://github.com/googleapis/google-cloud-python/pull/7139)) -- Fix linked classes in generated docstrings (via synth). ([#7060](https://github.com/googleapis/google-cloud-python/pull/7060)) - -### Internal / Testing Changes -- Run `instance_admin` system tests on a separate instance from `table_admin` and `data` system tests. ([#6579](https://github.com/googleapis/google-cloud-python/pull/6579)) -- Re-blacken. ([#7462](https://github.com/googleapis/google-cloud-python/pull/7462)) -- Copy lintified proto files (via synth). ([#7445](https://github.com/googleapis/google-cloud-python/pull/7445)) -- Remove unused message exports (via synth). ([#7264](https://github.com/googleapis/google-cloud-python/pull/7264)) -- Compare 0 using '!=', rather than 'is not'. ([#7312](https://github.com/googleapis/google-cloud-python/pull/7312)) -- Add protos as an artifact to library ([#7205](https://github.com/googleapis/google-cloud-python/pull/7205)) -- Protoc-generated serialization update. ([#7077](https://github.com/googleapis/google-cloud-python/pull/7077)) -- Blacken snippets. ([#7048](https://github.com/googleapis/google-cloud-python/pull/7048)) -- Bigtable client snippets ([#7020](https://github.com/googleapis/google-cloud-python/pull/7020)) -- Pick up order-of-enum fix from GAPIC generator. ([#6879](https://github.com/googleapis/google-cloud-python/pull/6879)) -- Plug systest instance leaks ([#7004](https://github.com/googleapis/google-cloud-python/pull/7004)) - -## 0.32.1 - -12-17-2018 16:38 PST - - -### Documentation -- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910)) -- Add snippets for table operations. ([#6484](https://github.com/googleapis/google-cloud-python/pull/6484)) - -## 0.32.0 - -12-10-2018 12:47 PST - - -### Implementation Changes -- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741)) -- Remove `deepcopy` from `PartialRowData.cells` property. ([#6648](https://github.com/googleapis/google-cloud-python/pull/6648)) -- Pick up fixes to GAPIC generator. ([#6630](https://github.com/googleapis/google-cloud-python/pull/6630)) - -### Dependencies -- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835)) - -### Internal / Testing Changes -- Blacken all gen'd libs ([#6792](https://github.com/googleapis/google-cloud-python/pull/6792)) -- Omit local deps ([#6701](https://github.com/googleapis/google-cloud-python/pull/6701)) -- Run black at end of synth.py ([#6698](https://github.com/googleapis/google-cloud-python/pull/6698)) -- Blackening Continued... ([#6667](https://github.com/googleapis/google-cloud-python/pull/6667)) -- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642)) - -## 0.31.1 - -11-02-2018 08:13 PDT - -### Implementation Changes -- Fix anonymous usage under Bigtable emulator ([#6385](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6385)) -- Support `DirectRow` without a `Table` ([#6336](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6336)) -- Add retry parameter to `Table.read_rows()`. ([#6281](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6281)) -- Fix `ConditionalRow` interaction with `check_and_mutate_row` ([#6296](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6296)) -- Deprecate `channel` arg to `Client` ([#6279](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6279)) - -### Dependencies -- Update dependency: `google-api-core >= 1.4.1` ([#6391](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6391)) -- Update IAM version in dependencies ([#6362](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6362)) - -### Documentation -- Add `docs/snippets.py` and test ([#6012](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6012)) -- Normalize use of support level badges ([#6159](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6159)) - -### Internal / Testing Changes -- Fix client_info bug, update docstrings and timeouts. ([#6406)](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6406)) -- Remove now-spurious fixup from 'synth.py'. ([#6400](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6400)) -- Fix flaky systests / snippets ([#6367](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6367)) -- Add explicit coverage for `row_data._retry_read_rows_exception`. ([#6364](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6364)) -- Fix instance IAM test methods ([#6343](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6343)) -- Fix error from new flake8 version. ([#6309](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6309)) -- Use new Nox ([#6175](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6175)) - -## 0.31.0 - -### New Features -- Upgrade support level from `alpha` to `beta`. ([#6129](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6129)) - -### Implementation Changes -- Improve admin operation timeouts. ([#6010](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6010)) - -### Documentation -- Prepare docs for repo split. ([#6014](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6014)) - -### Internal / Testing Changes -- Refactor `read_row` to call `read_rows` ([#6137](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) -- Harden instance teardown against '429 Too Many Requests'. ([#6102](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) -- Add `{RowSet,RowRange}.{__eq__,.__ne__}` ([#6025](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6025)) -- Regenerate low-level GAPIC code ([#6036](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6036)) - -## 0.30.2 - -### New Features -- Add iam policy implementation for an instance. (#5838) - -### Implementation Changes -- Fix smart retries for 'read_rows()' when reading the full table (#5966) - -### Documentation -- Replace links to `/stable/` with `/latest/`. (#5901) - -### Internal / Testing Changes -- Re-generate library using bigtable/synth.py (#5974) -- Refactor `read_rows` infrastructure (#5963) - -## 0.30.1 - -### Implementation changes - -- Fix non-admin access to table data. (#5875) -- Synth bigtable and bigtable admin GAPIC clients. (#5867) - -### Testing and internal changes - -- Nox: use in-place installs for local packages. (#5865) - -## 0.30.0 - -### New Features - -- Improve performance and capabilities of reads. `read_rows` now returns a generator; has automatic retries; and can read an arbitrary set of keys and ranges - - Consolidate read_rows and yield_rows (#5840) - - Implement row set for yield_rows (#5506) - - Improve read rows validation performance (#5390) - - Add retry for yield_rows (#4882) - - Require TimestampRanges to be milliseconds granularity (#5002) - - Provide better access to cell values (#4908) - - Add data app profile id (#5369) - -- Improve writes: Writes are usable in Beam - - Create MutationBatcher for bigtable (#5651) - - Allow DirectRow to be created without a table (#5567) - - Add data app profile id (#5369) - -- Improve table admin: Table creation now can also create families in a single RPC. Add an `exist()` method. Add `get_cluster_states` for information about replication - - Add 'Table.get_cluster_states' method (#5790) - - Optimize 'Table.exists' performance (#5749) - - Add column creation in 'Table.create()'. (#5576) - - Add 'Table.exists' method (#5545) - - Add split keys on create table - v2 (#5513) - - Avoid sharing table names across unrelated systests. (#5421) - - Add truncate table and drop by prefix on top of GAPIC integration (#5360) - -- Improve instance admin: Instance creation allows for the creation of multiple clusters. Instance label management is now enabled. - - Create app_profile_object (#5782) - - Add 'Instance.exists' method (#5802) - - Add 'InstanceAdminClient.list_clusters' method (#5715) - - Add 'Instance._state' property (#5736) - - Convert 'instance.labels' to return a dictionary (#5728) - - Reshape cluster.py, adding cluster() factory to instance.py (#5663) - - Convert 'Instance.update' to use 'instance.partial_instance_update' API (#5643) - - Refactor 'InstanceAdminClient.update_app_profile' to remove update_mask argument (#5684) - - Add the ability to create an instance with multiple clusters (#5622) - - Add 'instance_type', 'labels' to 'Instance' ctor (#5614) - - Add optional app profile to 'Instance.table' (#5605) - - Clean up Instance creation. (#5542) - - Make 'InstanceAdminClient.list_instances' return actual instance objects, not protos. (#5420) - - Add admin app profile methods on Instance (#5315) - -### Internal / Testing Changes -- Rename releases to changelog and include from CHANGELOG.md (#5191) -- Fix bad trove classifier -- Integrate new generated low-level client (#5178) -- Override gRPC max message lengths. (#5498) -- Use client properties rather than private attrs (#5398) -- Fix the broken Bigtable system test. (#5607) -- Fix Py3 breakage in new system test. (#5474) -- Modify system test for new GAPIC code (#5302) -- Add Test runs for Python 3.7 and remove 3.4 (#5295) -- Disable Bigtable system tests (#5381) -- Modify system tests to use prerelease versions of grpcio (#5304) -- Pass through 'session.posargs' when running Bigtable system tests. (#5418) -- Harden 'test_list_instances' against simultaneous test runs. (#5476) -- Shorten instance / cluster name to fix CI breakage. (#5641) -- Fix failing systest: 'test_create_instance_w_two_clusters'. (#5836) -- Add labels {'python-system': ISO-timestamp} to systest instances (#5729) -- Shorten cluster ID in system test (#5719) -- Harden 'test_list_instances' further. (#5696) -- Improve testing of create instance (#5544) - -## 0.29.0 - -### New features - -- Use `api_core.retry` for `mutate_row` (#4665, #4341) -- Added a row generator on a table. (#4679) - -### Implementation changes - -- Remove gax usage from BigTable (#4873) -- BigTable: Cell.from_pb() performance improvement (#4745) - -### Dependencies - -- Update dependency range for api-core to include v1.0.0 releases (#4944) - -### Documentation - -- Minor typo (#4758) -- Row filter end points documentation error (#4667) -- Removing "rename" from bigtable table.py comments (#4526) -- Small docs/hygiene tweaks after #4256. (#4333) - -### Testing and internal changes - -- Install local dependencies when running lint (#4936) -- Re-enable lint for tests, remove usage of pylint (#4921) -- Normalize all setup.py files (#4909) -- Timestamp system test fix (#4765) - -## 0.28.1 - -### Implementation Changes - -- Bugfix: Distinguish between an unset column qualifier and an empty string - column qualifier while parsing a `ReadRows` response (#4252) - -### Features added - -- Add a ``retry`` strategy that will be used for retry-able errors - in ``Table.mutate_rows``. This will be used for gRPC errors of type - ``ABORTED``, ``DEADLINE_EXCEEDED`` and ``SERVICE_UNAVAILABLE``. (#4256) - -PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.1/ - -## 0.28.0 - -### Documentation - -- Fixed referenced types in `Table.row` docstring (#3934, h/t to - @MichaelTamm) -- Added link to "Python Development Environment Setup Guide" in - project README (#4187, h/t to @michaelawyu) - -### Dependencies - -- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency - on `google-api-core` (#4221, #4280) - -PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.0/ diff --git a/bigtable/LICENSE b/bigtable/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/bigtable/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/bigtable/MANIFEST.in b/bigtable/MANIFEST.in deleted file mode 100644 index 9cbf175afe6b..000000000000 --- a/bigtable/MANIFEST.in +++ /dev/null @@ -1,5 +0,0 @@ -include README.rst LICENSE -recursive-include google *.json *.proto -recursive-include tests * -global-exclude *.py[co] -global-exclude __pycache__ diff --git a/bigtable/README.rst b/bigtable/README.rst deleted file mode 100644 index 5330d231688b..000000000000 --- a/bigtable/README.rst +++ /dev/null @@ -1,99 +0,0 @@ -Python Client for Google Cloud Bigtable -======================================= - -|GA| |pypi| |versions| - -`Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the -same database that powers many core Google services, including Search, -Analytics, Maps, and Gmail. - -- `Client Library Documentation`_ -- `Product Documentation`_ - -.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability -.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg - :target: https://pypi.org/project/google-cloud-bigtable/ -.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg - :target: https://pypi.org/project/google-cloud-bigtable/ -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable -.. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest -.. _Product Documentation: https://cloud.google.com/bigtable/docs - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. `Enable the Cloud Bigtable API.`_ -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Supported Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 - -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - source /bin/activate - /bin/pip install google-cloud-bigtable - - -Windows -^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - \Scripts\activate - \Scripts\pip.exe install google-cloud-bigtable - -Next Steps -~~~~~~~~~~ - -- Read the `Client Library Documentation`_ for Cloud Bigtable API - to see other available methods on the client. -- Read the `Product documentation`_ to learn - more about the product and see How-to Guides. - -``google-cloud-happybase`` --------------------------- - -In addition to the core ``google-cloud-bigtable``, we provide a -`google-cloud-happybase -`__ library -with the same interface as the popular `HappyBase -`__ library. Unlike HappyBase, -``google-cloud-happybase`` uses ``google-cloud-bigtable`` under the covers, -rather than Apache HBase. diff --git a/bigtable/docs/README.rst b/bigtable/docs/README.rst deleted file mode 120000 index 89a0106941ff..000000000000 --- a/bigtable/docs/README.rst +++ /dev/null @@ -1 +0,0 @@ -../README.rst \ No newline at end of file diff --git a/bigtable/docs/_static/custom.css b/bigtable/docs/_static/custom.css deleted file mode 100644 index 0abaf229fce3..000000000000 --- a/bigtable/docs/_static/custom.css +++ /dev/null @@ -1,4 +0,0 @@ -div#python2-eol { - border-color: red; - border-width: medium; -} \ No newline at end of file diff --git a/bigtable/docs/_templates/layout.html b/bigtable/docs/_templates/layout.html deleted file mode 100644 index 228529efe2d2..000000000000 --- a/bigtable/docs/_templates/layout.html +++ /dev/null @@ -1,50 +0,0 @@ - -{% extends "!layout.html" %} -{%- block content %} -{%- if theme_fixed_sidebar|lower == 'true' %} -
- {{ sidebar() }} - {%- block document %} -
- {%- if render_sidebar %} -
- {%- endif %} - - {%- block relbar_top %} - {%- if theme_show_relbar_top|tobool %} - - {%- endif %} - {% endblock %} - -
-
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please - visit Python 2 support on Google Cloud. -
- {% block body %} {% endblock %} -
- - {%- block relbar_bottom %} - {%- if theme_show_relbar_bottom|tobool %} - - {%- endif %} - {% endblock %} - - {%- if render_sidebar %} -
- {%- endif %} -
- {%- endblock %} -
-
-{%- else %} -{{ super() }} -{%- endif %} -{%- endblock %} diff --git a/bigtable/docs/changelog.md b/bigtable/docs/changelog.md deleted file mode 120000 index 04c99a55caae..000000000000 --- a/bigtable/docs/changelog.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGELOG.md \ No newline at end of file diff --git a/bigtable/docs/client-intro.rst b/bigtable/docs/client-intro.rst deleted file mode 100644 index 6a38437790e2..000000000000 --- a/bigtable/docs/client-intro.rst +++ /dev/null @@ -1,90 +0,0 @@ -Base for Everything -=================== - -To use the API, the :class:`Client ` -class defines a high-level interface which handles authorization -and creating other objects: - -.. code:: python - - from google.cloud.bigtable.client import Client - client = Client() - -Long-lived Defaults -------------------- - -When creating a :class:`Client `, the -``user_agent`` argument has sensible a default -(:data:`DEFAULT_USER_AGENT `). -However, you may over-ride it and the value will be used throughout all API -requests made with the ``client`` you create. - -Configuration -------------- - -- For an overview of authentication in ``google-cloud-python``, - see `Authentication `_. - -- In addition to any authentication configuration, you can also set the - :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the Google Cloud Console - project you'd like to interact with. If your code is running in Google App - Engine or Google Compute Engine the project will be detected automatically. - (Setting this environment variable is not required, you may instead pass the - ``project`` explicitly when constructing a - :class:`Client `). - -- After configuring your environment, create a - :class:`Client ` - - .. code:: - - >>> from google.cloud import bigtable - >>> client = bigtable.Client() - - or pass in ``credentials`` and ``project`` explicitly - - .. code:: - - >>> from google.cloud import bigtable - >>> client = bigtable.Client(project='my-project', credentials=creds) - -.. tip:: - - Be sure to use the **Project ID**, not the **Project Number**. - -Admin API Access ----------------- - -If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ -API requests, you'll need to pass the ``admin`` argument: - -.. code:: python - - client = bigtable.Client(admin=True) - -Read-Only Mode --------------- - -If, on the other hand, you only have (or want) read access to the data, -you can pass the ``read_only`` argument: - -.. code:: python - - client = bigtable.Client(read_only=True) - -This will ensure that the -:data:`READ_ONLY_SCOPE ` is used -for API requests (so any accidental requests that would modify data will -fail). - -Next Step ---------- - -After a :class:`Client `, the next highest-level -object is an :class:`Instance `. You'll need -one before you can interact with tables or data. - -Head next to learn about the :doc:`instance-api`. - -.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 -.. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/bigtable/docs/client.rst b/bigtable/docs/client.rst deleted file mode 100644 index c48595c8ac0b..000000000000 --- a/bigtable/docs/client.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client -~~~~~~ - -.. automodule:: google.cloud.bigtable.client - :members: - :show-inheritance: diff --git a/bigtable/docs/cluster.rst b/bigtable/docs/cluster.rst deleted file mode 100644 index ad33aae5e0b8..000000000000 --- a/bigtable/docs/cluster.rst +++ /dev/null @@ -1,6 +0,0 @@ -Cluster -~~~~~~~ - -.. automodule:: google.cloud.bigtable.cluster - :members: - :show-inheritance: diff --git a/bigtable/docs/column-family.rst b/bigtable/docs/column-family.rst deleted file mode 100644 index de6c1eb1f5df..000000000000 --- a/bigtable/docs/column-family.rst +++ /dev/null @@ -1,49 +0,0 @@ -Column Families -=============== - -When creating a -:class:`ColumnFamily `, it is -possible to set garbage collection rules for expired data. - -By setting a rule, cells in the table matching the rule will be deleted -during periodic garbage collection (which executes opportunistically in the -background). - -The types -:class:`MaxAgeGCRule `, -:class:`MaxVersionsGCRule `, -:class:`GarbageCollectionRuleUnion ` and -:class:`GarbageCollectionRuleIntersection ` -can all be used as the optional ``gc_rule`` argument in the -:class:`ColumnFamily ` -constructor. This value is then used in the -:meth:`create() ` and -:meth:`update() ` methods. - -These rules can be nested arbitrarily, with a -:class:`MaxAgeGCRule ` or -:class:`MaxVersionsGCRule ` -at the lowest level of the nesting: - -.. code:: python - - import datetime - - max_age = datetime.timedelta(days=3) - rule1 = MaxAgeGCRule(max_age) - rule2 = MaxVersionsGCRule(1) - - # Make a composite that matches anything older than 3 days **AND** - # with more than 1 version. - rule3 = GarbageCollectionIntersection(rules=[rule1, rule2]) - - # Make another composite that matches our previous intersection - # **OR** anything that has more than 3 versions. - rule4 = GarbageCollectionRule(max_num_versions=3) - rule5 = GarbageCollectionUnion(rules=[rule3, rule4]) - ----- - -.. automodule:: google.cloud.bigtable.column_family - :members: - :show-inheritance: diff --git a/bigtable/docs/conf.py b/bigtable/docs/conf.py deleted file mode 100644 index 97b890f1a8c9..000000000000 --- a/bigtable/docs/conf.py +++ /dev/null @@ -1,359 +0,0 @@ -# -*- coding: utf-8 -*- -# -# google-cloud-bigtable documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-bigtable" -copyright = u"2017, Google" -author = u"Google APIs" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-bigtable-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', - # Latex figure (float) alignment - #'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-bigtable.tex", - u"google-cloud-bigtable Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-bigtable", - u"google-cloud-bigtable Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-bigtable", - u"google-cloud-bigtable Documentation", - author, - "google-cloud-bigtable", - "GAPIC library for Bigtable", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/bigtable/docs/data-api.rst b/bigtable/docs/data-api.rst deleted file mode 100644 index b50995be7368..000000000000 --- a/bigtable/docs/data-api.rst +++ /dev/null @@ -1,344 +0,0 @@ -Data API -======== - -After creating a :class:`Table ` and some -column families, you are ready to store and retrieve data. - -Cells vs. Columns vs. Column Families -+++++++++++++++++++++++++++++++++++++ - -* As explained in the :doc:`table overview `, tables can - have many column families. -* As described below, a table can also have many rows which are - specified by row keys. -* Within a row, data is stored in a cell. A cell simply has a value (as - bytes) and a timestamp. The number of cells in each row can be - different, depending on what was stored in each row. -* Each cell lies in a column (**not** a column family). A column is really - just a more **specific** modifier within a column family. A column - can be present in every column family, in only one or anywhere in between. -* Within a column family there can be many columns. For example, within - the column family ``foo`` we could have columns ``bar`` and ``baz``. - These would typically be represented as ``foo:bar`` and ``foo:baz``. - -Modifying Data -++++++++++++++ - -Since data is stored in cells, which are stored in rows, we -use the metaphor of a **row** in classes that are used to modify -(write, update, delete) data in a -:class:`Table `. - -Direct vs. Conditional vs. Append ---------------------------------- - -There are three ways to modify data in a table, described by the -`MutateRow`_, `CheckAndMutateRow`_ and `ReadModifyWriteRow`_ API -methods. - -* The **direct** way is via `MutateRow`_ which involves simply - adding, overwriting or deleting cells. The - :class:`DirectRow ` class - handles direct mutations. -* The **conditional** way is via `CheckAndMutateRow`_. This method - first checks if some filter is matched in a given row, then - applies one of two sets of mutations, depending on if a match - occurred or not. (These mutation sets are called the "true - mutations" and "false mutations".) The - :class:`ConditionalRow ` class - handles conditional mutations. -* The **append** way is via `ReadModifyWriteRow`_. This simply - appends (as bytes) or increments (as an integer) data in a presumed - existing cell in a row. The - :class:`AppendRow ` class - handles append mutations. - -Row Factory ------------ - -A single factory can be used to create any of the three row types. -To create a :class:`DirectRow `: - -.. code:: python - - row = table.row(row_key) - -Unlike the previous string values we've used before, the row key must -be ``bytes``. - -To create a :class:`ConditionalRow `, -first create a :class:`RowFilter ` and -then - -.. code:: python - - cond_row = table.row(row_key, filter_=filter_) - -To create an :class:`AppendRow ` - -.. code:: python - - append_row = table.row(row_key, append=True) - -Building Up Mutations ---------------------- - -In all three cases, a set of mutations (or two sets) are built up -on a row before they are sent off in a batch via - -.. code:: python - - row.commit() - -Direct Mutations ----------------- - -Direct mutations can be added via one of four methods - -* :meth:`set_cell() ` allows a - single value to be written to a column - - .. code:: python - - row.set_cell(column_family_id, column, value, - timestamp=timestamp) - - If the ``timestamp`` is omitted, the current time on the Google Cloud - Bigtable server will be used when the cell is stored. - - The value can either be bytes or an integer, which will be converted to - bytes as a signed 64-bit integer. - -* :meth:`delete_cell() ` deletes - all cells (i.e. for all timestamps) in a given column - - .. code:: python - - row.delete_cell(column_family_id, column) - - Remember, this only happens in the ``row`` we are using. - - If we only want to delete cells from a limited range of time, a - :class:`TimestampRange ` can - be used - - .. code:: python - - row.delete_cell(column_family_id, column, - time_range=time_range) - -* :meth:`delete_cells() ` does - the same thing as - :meth:`delete_cell() `, - but accepts a list of columns in a column family rather than a single one. - - .. code:: python - - row.delete_cells(column_family_id, [column1, column2], - time_range=time_range) - - In addition, if we want to delete cells from every column in a column family, - the special :attr:`ALL_COLUMNS ` - value can be used - - .. code:: python - - row.delete_cells(column_family_id, row.ALL_COLUMNS, - time_range=time_range) - -* :meth:`delete() ` will delete the - entire row - - .. code:: python - - row.delete() - -Conditional Mutations ---------------------- - -Making **conditional** modifications is essentially identical -to **direct** modifications: it uses the exact same methods -to accumulate mutations. - -However, each mutation added must specify a ``state``: will the mutation be -applied if the filter matches or if it fails to match. - -For example: - -.. code:: python - - cond_row.set_cell(column_family_id, column, value, - timestamp=timestamp, state=True) - -will add to the set of true mutations. - -Append Mutations ----------------- - -Append mutations can be added via one of two methods - -* :meth:`append_cell_value() ` - appends a bytes value to an existing cell: - - .. code:: python - - append_row.append_cell_value(column_family_id, column, bytes_value) - -* :meth:`increment_cell_value() ` - increments an integer value in an existing cell: - - .. code:: python - - append_row.increment_cell_value(column_family_id, column, int_value) - - Since only bytes are stored in a cell, the cell value is decoded as - a signed 64-bit integer before being incremented. (This happens on - the Google Cloud Bigtable server, not in the library.) - -Notice that no timestamp was specified. This is because **append** mutations -operate on the latest value of the specified column. - -If there are no cells in the specified column, then the empty string (bytes -case) or zero (integer case) are the assumed values. - -Starting Fresh --------------- - -If accumulated mutations need to be dropped, use - -.. code:: python - - row.clear() - -Reading Data -++++++++++++ - -Read Single Row from a Table ----------------------------- - -To make a `ReadRows`_ API request for a single row key, use -:meth:`Table.read_row() `: - -.. code:: python - - >>> row_data = table.read_row(row_key) - >>> row_data.cells - { - u'fam1': { - b'col1': [ - , - , - ], - b'col2': [ - , - ], - }, - u'fam2': { - b'col3': [ - , - , - , - ], - }, - } - >>> cell = row_data.cells[u'fam1'][b'col1'][0] - >>> cell - - >>> cell.value - b'val1' - >>> cell.timestamp - datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=) - -Rather than returning a :class:`DirectRow ` -or similar class, this method returns a -:class:`PartialRowData ` -instance. This class is used for reading and parsing data rather than for -modifying data (as :class:`DirectRow ` is). - -A filter can also be applied to the results: - -.. code:: python - - row_data = table.read_row(row_key, filter_=filter_val) - -The allowable ``filter_`` values are the same as those used for a -:class:`ConditionalRow `. For -more information, see the -:meth:`Table.read_row() ` documentation. - -Stream Many Rows from a Table ------------------------------ - -To make a `ReadRows`_ API request for a stream of rows, use -:meth:`Table.read_rows() `: - -.. code:: python - - row_data = table.read_rows() - -Using gRPC over HTTP/2, a continual stream of responses will be delivered. -In particular - -* :meth:`consume_next() ` - pulls the next result from the stream, parses it and stores it on the - :class:`PartialRowsData ` instance -* :meth:`consume_all() ` - pulls results from the stream until there are no more -* :meth:`cancel() ` closes - the stream - -See the :class:`PartialRowsData ` -documentation for more information. - -As with -:meth:`Table.read_row() `, an optional -``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key`` -can be supplied for the stream, a ``limit`` can be set and a boolean -``allow_row_interleaving`` can be specified to allow faster streamed results -at the potential cost of non-sequential reads. - -See the :meth:`Table.read_rows() ` -documentation for more information on the optional arguments. - -Sample Keys in a Table ----------------------- - -Make a `SampleRowKeys`_ API request with -:meth:`Table.sample_row_keys() `: - -.. code:: python - - keys_iterator = table.sample_row_keys() - -The returned row keys will delimit contiguous sections of the table of -approximately equal size, which can be used to break up the data for -distributed tasks like mapreduces. - -As with -:meth:`Table.read_rows() `, the -returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream. - -The next key in the result can be accessed via - -.. code:: python - - next_key = keys_iterator.next() - -or all keys can be iterated over via - -.. code:: python - - for curr_key in keys_iterator: - do_something(curr_key) - -Just as with reading, the stream can be canceled: - -.. code:: python - - keys_iterator.cancel() - -.. _ReadRows: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L36-L38 -.. _SampleRowKeys: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L44-L46 -.. _MutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L50-L52 -.. _CheckAndMutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L62-L64 -.. _ReadModifyWriteRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L70-L72 diff --git a/bigtable/docs/index.rst b/bigtable/docs/index.rst deleted file mode 100644 index 8c76f79b80e1..000000000000 --- a/bigtable/docs/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. include:: README.rst - - -Using the API -------------- -.. toctree:: - :maxdepth: 2 - - usage - - -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - instance-api - table-api - data-api - - -Changelog ---------- - -For a list of all ``google-cloud-datastore`` releases: - -.. toctree:: - :maxdepth: 2 - - changelog diff --git a/bigtable/docs/instance-api.rst b/bigtable/docs/instance-api.rst deleted file mode 100644 index bc338d7c7ca9..000000000000 --- a/bigtable/docs/instance-api.rst +++ /dev/null @@ -1,135 +0,0 @@ -Instance Admin API -================== - -After creating a :class:`Client `, you can -interact with individual instances for a project. - -List Instances --------------- - -If you want a comprehensive list of all existing instances, make a -`ListInstances`_ API request with -:meth:`Client.list_instances() `: - -.. code:: python - - instances = client.list_instances() - -Instance Factory ----------------- - -To create an :class:`Instance ` object: - -.. code:: python - - instance = client.instance(instance_id, location_id, - display_name=display_name) - -- ``location_id`` is the ID of the location in which the instance's cluster - will be hosted, e.g. ``'us-central1-c'``. ``location_id`` is required for - instances which do not already exist. - -- ``display_name`` is optional. When not provided, ``display_name`` defaults - to the ``instance_id`` value. - -You can also use :meth:`Client.instance` to create a local wrapper for -instances that have already been created with the API, or through the web -console: - -.. code:: python - - instance = client.instance(existing_instance_id) - instance.reload() - -Create a new Instance ---------------------- - -After creating the instance object, make a `CreateInstance`_ API request -with :meth:`create() `: - -.. code:: python - - instance.display_name = 'My very own instance' - instance.create() - -Check on Current Operation --------------------------- - -.. note:: - - When modifying an instance (via a `CreateInstance`_ request), the Bigtable - API will return a `long-running operation`_ and a corresponding - :class:`Operation ` object - will be returned by - :meth:`create() `. - -You can check if a long-running operation (for a -:meth:`create() ` has finished -by making a `GetOperation`_ request with -:meth:`Operation.finished() `: - -.. code:: python - - >>> operation = instance.create() - >>> operation.finished() - True - -.. note:: - - Once an :class:`Operation ` object - has returned :data:`True` from - :meth:`finished() `, the - object should not be re-used. Subsequent calls to - :meth:`finished() ` - will result in a :class:`ValueError `. - -Get metadata for an existing Instance -------------------------------------- - -After creating the instance object, make a `GetInstance`_ API request -with :meth:`reload() `: - -.. code:: python - - instance.reload() - -This will load ``display_name`` for the existing ``instance`` object. - -Update an existing Instance ---------------------------- - -After creating the instance object, make an `UpdateInstance`_ API request -with :meth:`update() `: - -.. code:: python - - client.display_name = 'New display_name' - instance.update() - -Delete an existing Instance ---------------------------- - -Make a `DeleteInstance`_ API request with -:meth:`delete() `: - -.. code:: python - - instance.delete() - -Next Step ---------- - -Now we go down the hierarchy from -:class:`Instance ` to a -:class:`Table `. - -Head next to learn about the :doc:`table-api`. - -.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance -.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 -.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 -.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 -.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 -.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/bigtable/docs/instance.rst b/bigtable/docs/instance.rst deleted file mode 100644 index f9be9672fc64..000000000000 --- a/bigtable/docs/instance.rst +++ /dev/null @@ -1,6 +0,0 @@ -Instance -~~~~~~~~ - -.. automodule:: google.cloud.bigtable.instance - :members: - :show-inheritance: diff --git a/bigtable/docs/row-data.rst b/bigtable/docs/row-data.rst deleted file mode 100644 index 503f9b1cbdfd..000000000000 --- a/bigtable/docs/row-data.rst +++ /dev/null @@ -1,6 +0,0 @@ -Row Data -~~~~~~~~ - -.. automodule:: google.cloud.bigtable.row_data - :members: - :show-inheritance: diff --git a/bigtable/docs/row-filters.rst b/bigtable/docs/row-filters.rst deleted file mode 100644 index 292ae9dfb6aa..000000000000 --- a/bigtable/docs/row-filters.rst +++ /dev/null @@ -1,67 +0,0 @@ -Bigtable Row Filters -==================== - -It is possible to use a -:class:`RowFilter ` -when adding mutations to a -:class:`ConditionalRow ` and when -reading row data with :meth:`read_row() ` -or :meth:`read_rows() `. - -As laid out in the `RowFilter definition`_, the following basic filters -are provided: - -* :class:`SinkFilter <.row_filters.SinkFilter>` -* :class:`PassAllFilter <.row_filters.PassAllFilter>` -* :class:`BlockAllFilter <.row_filters.BlockAllFilter>` -* :class:`RowKeyRegexFilter <.row_filters.RowKeyRegexFilter>` -* :class:`RowSampleFilter <.row_filters.RowSampleFilter>` -* :class:`FamilyNameRegexFilter <.row_filters.FamilyNameRegexFilter>` -* :class:`ColumnQualifierRegexFilter <.row_filters.ColumnQualifierRegexFilter>` -* :class:`TimestampRangeFilter <.row_filters.TimestampRangeFilter>` -* :class:`ColumnRangeFilter <.row_filters.ColumnRangeFilter>` -* :class:`ValueRegexFilter <.row_filters.ValueRegexFilter>` -* :class:`ValueRangeFilter <.row_filters.ValueRangeFilter>` -* :class:`CellsRowOffsetFilter <.row_filters.CellsRowOffsetFilter>` -* :class:`CellsRowLimitFilter <.row_filters.CellsRowLimitFilter>` -* :class:`CellsColumnLimitFilter <.row_filters.CellsColumnLimitFilter>` -* :class:`StripValueTransformerFilter <.row_filters.StripValueTransformerFilter>` -* :class:`ApplyLabelFilter <.row_filters.ApplyLabelFilter>` - -In addition, these filters can be combined into composite filters with - -* :class:`RowFilterChain <.row_filters.RowFilterChain>` -* :class:`RowFilterUnion <.row_filters.RowFilterUnion>` -* :class:`ConditionalRowFilter <.row_filters.ConditionalRowFilter>` - -These rules can be nested arbitrarily, with a basic filter at the lowest -level. For example: - -.. code:: python - - # Filter in a specified column (matching any column family). - col1_filter = ColumnQualifierRegexFilter(b'columnbia') - - # Create a filter to label results. - label1 = u'label-red' - label1_filter = ApplyLabelFilter(label1) - - # Combine the filters to label all the cells in columnbia. - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Create a similar filter to label cells blue. - col2_filter = ColumnQualifierRegexFilter(b'columnseeya') - label2 = u'label-blue' - label2_filter = ApplyLabelFilter(label2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - ----- - -.. automodule:: google.cloud.bigtable.row_filters - :members: - :show-inheritance: - -.. _RowFilter definition: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/1ff247c2e3b7cd0a2dd49071b2d95beaf6563092/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_data.proto#L195 diff --git a/bigtable/docs/row.rst b/bigtable/docs/row.rst deleted file mode 100644 index 33686608b363..000000000000 --- a/bigtable/docs/row.rst +++ /dev/null @@ -1,7 +0,0 @@ -Bigtable Row -============ - -.. automodule:: google.cloud.bigtable.row - :members: - :show-inheritance: - :inherited-members: diff --git a/bigtable/docs/snippets.py b/bigtable/docs/snippets.py deleted file mode 100644 index 850362b4a42a..000000000000 --- a/bigtable/docs/snippets.py +++ /dev/null @@ -1,770 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018, Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Testable usage examples for Google Cloud Bigtable API wrapper - -Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task -with the API. - -To facilitate running the examples as system tests, each example is also passed -a ``to_delete`` list; the function adds to the list any objects created which -need to be deleted during teardown. - -.. note:: - This file is under progress and will be updated with more guidance from - the team. Unit tests will be added with guidance from the team. - -""" - -import datetime -import pytest - -from test_utils.system import unique_resource_id -from test_utils.retry import RetryErrors -from google.api_core.exceptions import NotFound -from google.api_core.exceptions import TooManyRequests -from google.api_core.exceptions import DeadlineExceeded -from google.cloud._helpers import UTC -from google.cloud.bigtable import Client -from google.cloud.bigtable import enums - - -UNIQUE_SUFFIX = unique_resource_id("-") -INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX -CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX -APP_PROFILE_ID = "app-prof" + UNIQUE_SUFFIX -TABLE_ID = "tabl-1" + UNIQUE_SUFFIX -ROUTING_POLICY_TYPE = enums.RoutingPolicyType.ANY -LOCATION_ID = "us-central1-f" -ALT_LOCATION_ID = "us-central1-a" -PRODUCTION = enums.Instance.Type.PRODUCTION -SERVER_NODES = 3 -STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u"python-snippet" -LABEL_STAMP = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(LABEL_STAMP)} -INSTANCES_TO_DELETE = [] - -retry_429 = RetryErrors(TooManyRequests, max_tries=9) -retry_504 = RetryErrors(DeadlineExceeded, max_tries=4) - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - TABLE = None - - -def setup_module(): - client = Config.CLIENT = Client(admin=True) - Config.INSTANCE = client.instance( - INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS - ) - cluster = Config.INSTANCE.cluster( - CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE, - ) - operation = Config.INSTANCE.create(clusters=[cluster]) - # We want to make sure the operation completes. - operation.result(timeout=100) - Config.TABLE = Config.INSTANCE.table(TABLE_ID) - retry_504(Config.TABLE.create)() - - -def teardown_module(): - retry_429(Config.INSTANCE.delete)() - - for instance in INSTANCES_TO_DELETE: - try: - retry_429(instance.delete)() - except NotFound: - pass - - -def test_bigtable_create_instance(): - # [START bigtable_create_prod_instance] - from google.cloud.bigtable import Client - from google.cloud.bigtable import enums - - my_instance_id = "inst-my-" + UNIQUE_SUFFIX - my_cluster_id = "clus-my-" + UNIQUE_SUFFIX - location_id = "us-central1-f" - serve_nodes = 3 - storage_type = enums.StorageType.SSD - production = enums.Instance.Type.PRODUCTION - labels = {"prod-label": "prod-label"} - - client = Client(admin=True) - instance = client.instance(my_instance_id, instance_type=production, labels=labels) - cluster = instance.cluster( - my_cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type, - ) - operation = instance.create(clusters=[cluster]) - - # We want to make sure the operation completes. - operation.result(timeout=100) - - # [END bigtable_create_prod_instance] - - try: - assert instance.exists() - finally: - retry_429(instance.delete)() - - -def test_bigtable_create_additional_cluster(): - # [START bigtable_create_cluster] - from google.cloud.bigtable import Client - from google.cloud.bigtable import enums - - # Assuming that there is an existing instance with `INSTANCE_ID` - # on the server already. - # to create an instance see - # 'https://cloud.google.com/bigtable/docs/creating-instance' - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - cluster_id = "clus-my-" + UNIQUE_SUFFIX - location_id = "us-central1-a" - serve_nodes = 3 - storage_type = enums.StorageType.SSD - - cluster = instance.cluster( - cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type, - ) - operation = cluster.create() - # We want to make sure the operation completes. - operation.result(timeout=100) - # [END bigtable_create_cluster] - - try: - assert cluster.exists() - finally: - retry_429(cluster.delete)() - - -def test_bigtable_create_reload_delete_app_profile(): - import re - - # [START bigtable_create_app_profile] - from google.cloud.bigtable import Client - from google.cloud.bigtable import enums - - routing_policy_type = enums.RoutingPolicyType.ANY - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - description = "routing policy-multy" - - app_profile = instance.app_profile( - app_profile_id=APP_PROFILE_ID, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=CLUSTER_ID, - ) - - app_profile = app_profile.create(ignore_warnings=True) - # [END bigtable_create_app_profile] - - # [START bigtable_app_profile_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - app_profile = instance.app_profile(APP_PROFILE_ID) - - app_profile_name = app_profile.name - # [END bigtable_app_profile_name] - _profile_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/" - r"appProfiles/(?P" - r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - ) - assert _profile_name_re.match(app_profile_name) - - # [START bigtable_app_profile_exists] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - app_profile = instance.app_profile(APP_PROFILE_ID) - - app_profile_exists = app_profile.exists() - # [END bigtable_app_profile_exists] - assert app_profile_exists - - # [START bigtable_reload_app_profile] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - app_profile = instance.app_profile(APP_PROFILE_ID) - - app_profile.reload() - # [END bigtable_reload_app_profile] - assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE - - # [START bigtable_update_app_profile] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - app_profile = instance.app_profile(APP_PROFILE_ID) - app_profile.reload() - - description = "My new app profile" - app_profile.description = description - app_profile.update() - # [END bigtable_update_app_profile] - assert app_profile.description == description - - # [START bigtable_delete_app_profile] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - app_profile = instance.app_profile(APP_PROFILE_ID) - app_profile.reload() - - app_profile.delete(ignore_warnings=True) - # [END bigtable_delete_app_profile] - assert not app_profile.exists() - - -def test_bigtable_list_instances(): - # [START bigtable_list_instances] - from google.cloud.bigtable import Client - - client = Client(admin=True) - (instances_list, failed_locations_list) = client.list_instances() - # [END bigtable_list_instances] - - assert len(instances_list) > 0 - - -def test_bigtable_list_clusters_on_instance(): - # [START bigtable_list_clusters_on_instance] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - (clusters_list, failed_locations_list) = instance.list_clusters() - # [END bigtable_list_clusters_on_instance] - - assert len(clusters_list) > 0 - - -def test_bigtable_list_clusters_in_project(): - # [START bigtable_list_clusters_in_project] - from google.cloud.bigtable import Client - - client = Client(admin=True) - (clusters_list, failed_locations_list) = client.list_clusters() - # [END bigtable_list_clusters_in_project] - - assert len(clusters_list) > 0 - - -def test_bigtable_list_app_profiles(): - app_profile = Config.INSTANCE.app_profile( - app_profile_id="app-prof-" + UNIQUE_SUFFIX, - routing_policy_type=enums.RoutingPolicyType.ANY, - ) - app_profile = app_profile.create(ignore_warnings=True) - - # [START bigtable_list_app_profiles] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - app_profiles_list = instance.list_app_profiles() - # [END bigtable_list_app_profiles] - - try: - assert len(app_profiles_list) > 0 - finally: - retry_429(app_profile.delete)(ignore_warnings=True) - - -def test_bigtable_instance_exists(): - # [START bigtable_check_instance_exists] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance_exists = instance.exists() - # [END bigtable_check_instance_exists] - - assert instance_exists - - -def test_bigtable_cluster_exists(): - # [START bigtable_check_cluster_exists] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - cluster_exists = cluster.exists() - # [END bigtable_check_cluster_exists] - - assert cluster_exists - - -def test_bigtable_reload_instance(): - # [START bigtable_reload_instance] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - # [END bigtable_reload_instance] - - assert instance.type_ == PRODUCTION.value - - -def test_bigtable_reload_cluster(): - # [START bigtable_reload_cluster] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - cluster.reload() - # [END bigtable_reload_cluster] - - assert cluster.serve_nodes == SERVER_NODES - - -def test_bigtable_update_instance(): - # [START bigtable_update_instance] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - display_name = "My new instance" - instance.display_name = display_name - instance.update() - # [END bigtable_update_instance] - - assert instance.display_name == display_name - - -def test_bigtable_update_cluster(): - # [START bigtable_update_cluster] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - cluster.serve_nodes = 4 - cluster.update() - # [END bigtable_update_cluster] - - assert cluster.serve_nodes == 4 - - -def test_bigtable_create_table(): - # [START bigtable_create_table] - from google.api_core import exceptions - from google.api_core import retry - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("table_my") - # Define the GC policy to retain only the most recent 2 versions. - max_versions_rule = column_family.MaxVersionsGCRule(2) - - # Could include other retriable exception types - # Could configure deadline, etc. - predicate_504 = retry.if_exception_type(exceptions.DeadlineExceeded) - retry_504 = retry.Retry(predicate_504) - - retry_504(table.create)(column_families={"cf1": max_versions_rule}) - # [END bigtable_create_table] - - try: - assert table.exists() - finally: - retry_429(table.delete)() - - -def test_bigtable_list_tables(): - - # [START bigtable_list_tables] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - tables_list = instance.list_tables() - # [END bigtable_list_tables] - - # Check if returned list has expected table - table_names = [table.name for table in tables_list] - assert Config.TABLE.name in table_names - - -def test_bigtable_delete_cluster(): - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster_id = "clus-my-" + UNIQUE_SUFFIX - cluster = instance.cluster( - cluster_id, - location_id=ALT_LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE, - ) - operation = cluster.create() - # We want to make sure the operation completes. - operation.result(timeout=1000) - - # [START bigtable_delete_cluster] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster_to_delete = instance.cluster(cluster_id) - - cluster_to_delete.delete() - # [END bigtable_delete_cluster] - - assert not cluster_to_delete.exists() - - -def test_bigtable_delete_instance(): - from google.cloud.bigtable import Client - - client = Client(admin=True) - - instance_id = "snipt-inst-del" + UNIQUE_SUFFIX - instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS) - cluster = instance.cluster( - "clus-to-delete" + UNIQUE_SUFFIX, - location_id=ALT_LOCATION_ID, - serve_nodes=1, - default_storage_type=STORAGE_TYPE, - ) - operation = instance.create(clusters=[cluster]) - - # We want to make sure the operation completes. - operation.result(timeout=100) - - # Make sure this instance gets deleted after the test case. - INSTANCES_TO_DELETE.append(instance) - - # [START bigtable_delete_instance] - from google.cloud.bigtable import Client - - client = Client(admin=True) - - instance_to_delete = client.instance(instance_id) - instance_to_delete.delete() - # [END bigtable_delete_instance] - - assert not instance_to_delete.exists() - - # Skip deleting it during module teardown if the assertion succeeds. - INSTANCES_TO_DELETE.remove(instance) - - -def test_bigtable_test_iam_permissions(): - # [START bigtable_test_iam_permissions] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - permissions = ["bigtable.clusters.create", "bigtable.tables.create"] - permissions_allowed = instance.test_iam_permissions(permissions) - # [END bigtable_test_iam_permissions] - - assert permissions_allowed == permissions - - -def test_bigtable_set_iam_policy_then_get_iam_policy(): - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_set_iam_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - new_policy = Policy() - new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = instance.set_iam_policy(new_policy) - # [END bigtable_set_iam_policy] - - assert len(policy_latest.bigtable_admins) > 0 - - # [START bigtable_get_iam_policy] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - policy = instance.get_iam_policy() - # [END bigtable_get_iam_policy] - - assert len(policy.bigtable_admins) > 0 - - -def test_bigtable_project_path(): - import re - - # [START bigtable_project_path] - from google.cloud.bigtable import Client - - client = Client(admin=True) - project_path = client.project_path - # [END bigtable_project_path] - - -def test_bigtable_table_data_client(): - # [START bigtable_table_data_client] - from google.cloud.bigtable import Client - - client = Client(admin=True) - table_data_client = client.table_data_client - # [END bigtable_table_data_client] - - -def test_bigtable_table_admin_client(): - # [START bigtable_table_admin_client] - from google.cloud.bigtable import Client - - client = Client(admin=True) - table_admin_client = client.table_admin_client - # [END bigtable_table_admin_client] - - -def test_bigtable_instance_admin_client(): - # [START bigtable_instance_admin_client] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance_admin_client = client.instance_admin_client - # [END bigtable_instance_admin_client] - - -def test_bigtable_admins_policy(): - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_admins_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - new_policy = Policy() - new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = instance.set_iam_policy(new_policy) - policy = policy_latest.bigtable_admins - # [END bigtable_admins_policy] - - assert len(policy) > 0 - - -def test_bigtable_readers_policy(): - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_readers_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - new_policy = Policy() - new_policy[BIGTABLE_READER_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = instance.set_iam_policy(new_policy) - policy = policy_latest.bigtable_readers - # [END bigtable_readers_policy] - - assert len(policy) > 0 - - -def test_bigtable_users_policy(): - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_users_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - new_policy = Policy() - new_policy[BIGTABLE_USER_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = instance.set_iam_policy(new_policy) - policy = policy_latest.bigtable_users - # [END bigtable_users_policy] - - assert len(policy) > 0 - - -def test_bigtable_viewers_policy(): - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_viewers_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance.reload() - new_policy = Policy() - new_policy[BIGTABLE_VIEWER_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = instance.set_iam_policy(new_policy) - policy = policy_latest.bigtable_viewers - # [END bigtable_viewers_policy] - - assert len(policy) > 0 - - -def test_bigtable_instance_name(): - import re - - # [START bigtable_instance_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance_name = instance.name - # [END bigtable_instance_name] - - -def test_bigtable_cluster_name(): - import re - - # [START bigtable_cluster_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - cluster_name = cluster.name - # [END bigtable_cluster_name] - - -def test_bigtable_instance_from_pb(): - # [START bigtable_instance_from_pb] - from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - name = instance.name - instance_pb = instance_pb2.Instance( - name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS - ) - - instance2 = instance.from_pb(instance_pb, client) - # [END bigtable_instance_from_pb] - - assert instance2.name == instance.name - - -def test_bigtable_cluster_from_pb(): - # [START bigtable_cluster_from_pb] - from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - - name = cluster.name - cluster_state = cluster.state - cluster_pb = instance_pb2.Cluster( - name=name, - location=LOCATION_ID, - state=cluster_state, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE, - ) - - cluster2 = cluster.from_pb(cluster_pb, instance) - # [END bigtable_cluster_from_pb] - - assert cluster2.name == cluster.name - - -def test_bigtable_instance_state(): - # [START bigtable_instance_state] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - instance_state = instance.state - # [END bigtable_instance_state] - - assert not instance_state - - -def test_bigtable_cluster_state(): - # [START bigtable_cluster_state] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - cluster = instance.cluster(CLUSTER_ID) - cluster_state = cluster.state - # [END bigtable_cluster_state] - - assert not cluster_state - - -if __name__ == "__main__": - pytest.main() diff --git a/bigtable/docs/snippets_table.py b/bigtable/docs/snippets_table.py deleted file mode 100644 index 702cf31b1447..000000000000 --- a/bigtable/docs/snippets_table.py +++ /dev/null @@ -1,1291 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2018, Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Testable usage examples for Google Cloud Bigtable API wrapper - -Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task -with the API. - -To facilitate running the examples as system tests, each example is also passed -a ``to_delete`` list; the function adds to the list any objects created which -need to be deleted during teardown. - -.. note:: - This file is under progress and will be updated with more guidance from - the team. Unit tests will be added with guidance from the team. - -""" - -import datetime -import pytest - -from test_utils.system import unique_resource_id -from google.cloud._helpers import UTC -from google.cloud.bigtable import Client -from google.cloud.bigtable import enums -from google.cloud.bigtable import column_family - - -INSTANCE_ID = "snippet" + unique_resource_id("-") -CLUSTER_ID = "clus-1" + unique_resource_id("-") -TABLE_ID = "tabl-1" + unique_resource_id("-") -COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-") -LOCATION_ID = "us-central1-f" -ALT_LOCATION_ID = "us-central1-a" -PRODUCTION = enums.Instance.Type.PRODUCTION -SERVER_NODES = 3 -STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u"python-snippet" -LABEL_STAMP = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(LABEL_STAMP)} -COLUMN_FAMILY_ID = "col_fam_id1" -COL_NAME1 = b"col-name1" -CELL_VAL1 = b"cell-val" -ROW_KEY1 = b"row_key_id1" -COLUMN_FAMILY_ID2 = "col_fam_id2" -COL_NAME2 = b"col-name2" -CELL_VAL2 = b"cell-val2" -ROW_KEY2 = b"row_key_id2" - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - TABLE = None - - -def setup_module(): - client = Config.CLIENT = Client(admin=True) - Config.INSTANCE = client.instance( - INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS - ) - cluster = Config.INSTANCE.cluster( - CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE, - ) - operation = Config.INSTANCE.create(clusters=[cluster]) - # We want to make sure the operation completes. - operation.result(timeout=100) - Config.TABLE = Config.INSTANCE.table(TABLE_ID) - Config.TABLE.create() - gc_rule = column_family.MaxVersionsGCRule(2) - column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, gc_rule=gc_rule) - column_family1.create() - gc_rule2 = column_family.MaxVersionsGCRule(4) - column_family2 = Config.TABLE.column_family(COLUMN_FAMILY_ID2, gc_rule=gc_rule2) - column_family2.create() - - -def teardown_module(): - Config.INSTANCE.delete() - - -def test_bigtable_create_table(): - # [START bigtable_create_table] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - # Create table without Column families. - table1 = instance.table("table_id1") - table1.create() - - # Create table with Column families. - table2 = instance.table("table_id2") - # Define the GC policy to retain only the most recent 2 versions. - max_versions_rule = column_family.MaxVersionsGCRule(2) - table2.create(column_families={"cf1": max_versions_rule}) - - # [END bigtable_create_table] - assert table1.exists() - assert table2.exists() - table1.delete() - table2.delete() - - -def test_bigtable_sample_row_keys(): - table_sample = Config.INSTANCE.table("table_id1_samplerow") - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - table_sample.create(initial_split_keys=initial_split_keys) - assert table_sample.exists() - - # [START bigtable_sample_row_keys] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - table = instance.table("table_id1_samplerow") - data = table.sample_row_keys() - actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data]) - # [END bigtable_sample_row_keys] - initial_split_keys.append(b"") - assert list(actual_keys) == initial_split_keys - table.delete() - - -def test_bigtable_write_read_drop_truncate(): - # [START bigtable_mutate_rows] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_20", - b"row_key_22", - b"row_key_200", - ] - col_name = b"col-name1" - rows = [] - for i, row_key in enumerate(row_keys): - value = "value_{}".format(i).encode() - row = table.row(row_key) - row.set_cell( - COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow() - ) - rows.append(row) - response = table.mutate_rows(rows) - # validate that all rows written successfully - for i, status in enumerate(response): - if status.code != 0: - print("Row number {} failed to write".format(i)) - # [END bigtable_mutate_rows] - assert len(response) == len(rows) - # [START bigtable_read_row] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key = "row_key_1" - row = table.read_row(row_key) - # [END bigtable_read_row] - assert row.row_key.decode("utf-8") == row_key - # [START bigtable_read_rows] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Read full table - partial_rows = table.read_rows() - read_rows = [row for row in partial_rows] - # [END bigtable_read_rows] - assert len(read_rows) == len(rows) - # [START bigtable_drop_by_prefix] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key_prefix = b"row_key_2" - table.drop_by_prefix(row_key_prefix, timeout=200) - # [END bigtable_drop_by_prefix] - dropped_row_keys = [b"row_key_2", b"row_key_20", b"row_key_22", b"row_key_200"] - for row in table.read_rows(): - assert row.row_key.decode("utf-8") not in dropped_row_keys - - # [START bigtable_truncate_table] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - table.truncate(timeout=200) - # [END bigtable_truncate_table] - rows_data_after_truncate = [] - for row in table.read_rows(): - rows_data_after_truncate.append(row.row_key) - assert rows_data_after_truncate == [] - - -def test_bigtable_mutations_batcher(): - # [START bigtable_mutations_batcher] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - batcher = table.mutations_batcher() - # [END bigtable_mutations_batcher] - - # Below code will be used while creating batcher.py snippets. - # So not removing this code as of now. - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_20", - b"row_key_22", - b"row_key_200", - ] - column_name = "column_name".encode() - # Add a single row - row_key = row_keys[0] - row = table.row(row_key) - row.set_cell( - COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow() - ) - batcher.mutate(row) - # Add a collections of rows - rows = [] - for i in range(1, len(row_keys)): - row = table.row(row_keys[i]) - value = "value_{}".format(i).encode() - row.set_cell( - COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow() - ) - rows.append(row) - batcher.mutate_rows(rows) - # batcher will flush current batch if it - # reaches the max flush_count - - # Manually send the current batch to Cloud Bigtable - batcher.flush() - rows_on_table = [] - for row in table.read_rows(): - rows_on_table.append(row.row_key) - assert len(rows_on_table) == len(row_keys) - table.truncate(timeout=200) - - -def test_bigtable_table_column_family(): - # [START bigtable_table_column_family] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - table = instance.table(TABLE_ID) - column_family_obj = table.column_family(COLUMN_FAMILY_ID) - # [END bigtable_table_column_family] - - assert column_family_obj.column_family_id == COLUMN_FAMILY_ID - - -def test_bigtable_list_tables(): - # [START bigtable_list_tables] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - tables_list = instance.list_tables() - # [END bigtable_list_tables] - assert len(tables_list) != 0 - - -def test_bigtable_table_name(): - import re - - # [START bigtable_table_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - - table = instance.table(TABLE_ID) - table_name = table.name - # [END bigtable_table_name] - _table_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/tables/" - r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - ) - assert _table_name_re.match(table_name) - - -def test_bigtable_list_column_families(): - # [START bigtable_list_column_families] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_family_list = table.list_column_families() - # [END bigtable_list_column_families] - - assert len(column_family_list) > 0 - - -def test_bigtable_get_cluster_states(): - # [START bigtable_get_cluster_states] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - get_cluster_states = table.get_cluster_states() - # [END bigtable_get_cluster_states] - - assert CLUSTER_ID in get_cluster_states - - -def test_bigtable_table_test_iam_permissions(): - table_policy = Config.INSTANCE.table("table_id_iam_policy") - table_policy.create() - assert table_policy.exists - - # [START bigtable_table_test_iam_permissions] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("table_id_iam_policy") - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - permissions_allowed = table.test_iam_permissions(permissions) - # [END bigtable_table_test_iam_permissions] - assert permissions_allowed == permissions - - -def test_bigtable_table_set_iam_policy_then_get_iam_policy(): - table_policy = Config.INSTANCE.table("table_id_iam_policy") - assert table_policy.exists - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_table_set_iam_policy] - from google.cloud.bigtable import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("table_id_iam_policy") - new_policy = Policy() - new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] - - policy_latest = table.set_iam_policy(new_policy) - # [END bigtable_table_set_iam_policy] - assert len(policy_latest.bigtable_admins) > 0 - - # [START bigtable_table_get_iam_policy] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("table_id_iam_policy") - policy = table.get_iam_policy() - # [END bigtable_table_get_iam_policy] - assert len(policy.bigtable_admins) > 0 - - -def test_bigtable_table_exists(): - # [START bigtable_check_table_exists] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - table_exists = table.exists() - # [END bigtable_check_table_exists] - assert table_exists - - -def test_bigtable_delete_table(): - table_del = Config.INSTANCE.table("table_id_del") - table_del.create() - assert table_del.exists() - - # [START bigtable_delete_table] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("table_id_del") - - table.delete() - # [END bigtable_delete_table] - assert not table.exists() - - -def test_bigtable_table_row(): - # [START bigtable_table_row] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_keys = [b"row_key_1", b"row_key_2"] - row1_obj = table.row(row_keys[0]) - row2_obj = table.row(row_keys[1]) - # [END bigtable_table_row] - - row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row1_obj.commit() - row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row2_obj.commit() - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - - assert written_row_keys == row_keys - - table.truncate(timeout=300) - - -def test_bigtable_table_append_row(): - # [START bigtable_table_append_row] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_keys = [b"row_key_1", b"row_key_2"] - row1_obj = table.append_row(row_keys[0]) - row2_obj = table.append_row(row_keys[1]) - # [END bigtable_table_append_row] - - row1_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row1_obj.commit() - row2_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row2_obj.commit() - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - - assert written_row_keys == row_keys - - table.truncate(timeout=300) - - -def test_bigtable_table_direct_row(): - # [START bigtable_table_direct_row] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_keys = [b"row_key_1", b"row_key_2"] - row1_obj = table.direct_row(row_keys[0]) - row2_obj = table.direct_row(row_keys[1]) - # [END bigtable_table_direct_row] - - row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row1_obj.commit() - row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row2_obj.commit() - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - - assert written_row_keys == row_keys - - table.truncate(timeout=300) - - -def test_bigtable_table_conditional_row(): - # [START bigtable_table_conditional_row] - from google.cloud.bigtable import Client - from google.cloud.bigtable.row_filters import PassAllFilter - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_keys = [b"row_key_1", b"row_key_2"] - filter_ = PassAllFilter(True) - row1_obj = table.conditional_row(row_keys[0], filter_=filter_) - row2_obj = table.conditional_row(row_keys[1], filter_=filter_) - # [END bigtable_table_conditional_row] - - row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False) - row1_obj.commit() - row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False) - row2_obj.commit() - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - - assert written_row_keys == row_keys - - table.truncate(timeout=300) - - -def test_bigtable_column_family_name(): - # [START bigtable_column_family_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_families = table.list_column_families() - column_family_obj = column_families[COLUMN_FAMILY_ID] - column_family_name = column_family_obj.name - # [END bigtable_column_family_name] - import re - - _cf_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/tables/" - r"(?P[^/]+)/columnFamilies/" - r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - ) - assert _cf_name_re.match(column_family_name) - - -def test_bigtable_create_update_delete_column_family(): - # [START bigtable_create_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_family_id = "column_family_id1" - gc_rule = column_family.MaxVersionsGCRule(2) - column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule) - column_family_obj.create() - - # [END bigtable_create_column_family] - column_families = table.list_column_families() - assert column_families[column_family_id].gc_rule == gc_rule - - # [START bigtable_update_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Already existing column family id - column_family_id = "column_family_id1" - # Define the GC rule to retain data with max age of 5 days - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule) - column_family_obj.update() - # [END bigtable_update_column_family] - - updated_families = table.list_column_families() - assert updated_families[column_family_id].gc_rule == max_age_rule - - # [START bigtable_delete_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_family_id = "column_family_id1" - column_family_obj = table.column_family(column_family_id) - column_family_obj.delete() - # [END bigtable_delete_column_family] - column_families = table.list_column_families() - assert column_family_id not in column_families - - -def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - b"row_key_7", - b"row_key_8", - b"row_key_9", - ] - - rows = [] - for row_key in row_keys: - row = Config.TABLE.row(row_key) - row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - rows.append(row) - Config.TABLE.mutate_rows(rows) - - # [START bigtable_add_row_key] - from google.cloud.bigtable import Client - from google.cloud.bigtable.row_set import RowSet - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_set = RowSet() - row_set.add_row_key(b"row_key_5") - # [END bigtable_add_row_key] - - read_rows = table.read_rows(row_set=row_set) - expected_row_keys = [b"row_key_5"] - found_row_keys = [row.row_key for row in read_rows] - assert found_row_keys == expected_row_keys - - # [START bigtable_add_row_range] - from google.cloud.bigtable import Client - from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable.row_set import RowRange - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - # [END bigtable_add_row_range] - - read_rows = table.read_rows(row_set=row_set) - expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] - found_row_keys = [row.row_key for row in read_rows] - assert found_row_keys == expected_row_keys - - # [START bigtable_row_range_from_keys] - from google.cloud.bigtable import Client - from google.cloud.bigtable.row_set import RowSet - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_set = RowSet() - row_set.add_row_range_from_keys(start_key=b"row_key_3", end_key=b"row_key_7") - # [END bigtable_row_range_from_keys] - - read_rows = table.read_rows(row_set=row_set) - expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] - found_row_keys = [row.row_key for row in read_rows] - assert found_row_keys == expected_row_keys - - table.truncate(timeout=200) - - -def test_bigtable_batcher_mutate_flush_mutate_rows(): - # [START bigtable_batcher_mutate] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - # Batcher for max row bytes, max_row_bytes=1024 is optional. - batcher = table.mutations_batcher(max_row_bytes=1024) - - # Add a single row - row_key = b"row_key_1" - row = table.row(row_key) - row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow() - ) - - # In batcher, mutate will flush current batch if it - # reaches the max_row_bytes - batcher.mutate(row) - batcher.flush() - # [END bigtable_batcher_mutate] - - # [START bigtable_batcher_flush] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - # Batcher for max row bytes, max_row_bytes=1024 is optional. - batcher = table.mutations_batcher(max_row_bytes=1024) - - # Add a single row - row_key = b"row_key" - row = table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, "value-0") - - # In batcher, mutate will flush current batch if it - # reaches the max_row_bytes - batcher.mutate(row) - batcher.flush() - # [END bigtable_batcher_flush] - - rows_on_table = [] - for row in table.read_rows(): - rows_on_table.append(row.row_key) - assert len(rows_on_table) == 2 - table.truncate(timeout=200) - - # [START bigtable_batcher_mutate_rows] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - batcher = table.mutations_batcher() - - row1 = table.row(b"row_key_1") - row2 = table.row(b"row_key_2") - row3 = table.row(b"row_key_3") - row4 = table.row(b"row_key_4") - - row1.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val1") - row2.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val2") - row3.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val3") - row4.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val4") - - batcher.mutate_rows([row1, row2, row3, row4]) - - # batcher will flush current batch if it - # reaches the max flush_count - # Manually send the current batch to Cloud Bigtable - batcher.flush() - # [END bigtable_batcher_mutate_rows] - - rows_on_table = [] - for row in table.read_rows(): - rows_on_table.append(row.row_key) - assert len(rows_on_table) == 4 - table.truncate(timeout=200) - - -def test_bigtable_create_family_gc_max_age(): - # [START bigtable_create_family_gc_max_age] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Define the GC rule to retain data with max age of 5 days - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - - column_family_obj = table.column_family("cf1", max_age_rule) - column_family_obj.create() - - # [END bigtable_create_family_gc_max_age] - rule = str(column_family_obj.to_pb()) - assert "max_age" in rule - assert "seconds: 432000" in rule - column_family_obj.delete() - - -def test_bigtable_create_family_gc_max_versions(): - # [START bigtable_create_family_gc_max_versions] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Define the GC policy to retain only the most recent 2 versions - max_versions_rule = column_family.MaxVersionsGCRule(2) - - column_family_obj = table.column_family("cf2", max_versions_rule) - column_family_obj.create() - - # [END bigtable_create_family_gc_max_versions] - rule = str(column_family_obj.to_pb()) - assert "max_num_versions: 2" in rule - column_family_obj.delete() - - -def test_bigtable_create_family_gc_union(): - # [START bigtable_create_family_gc_union] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - max_versions_rule = column_family.MaxVersionsGCRule(2) - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - - union_rule = column_family.GCRuleUnion([max_versions_rule, max_age_rule]) - - column_family_obj = table.column_family("cf3", union_rule) - column_family_obj.create() - - # [END bigtable_create_family_gc_union] - rule = str(column_family_obj.to_pb()) - assert "union" in rule - assert "max_age" in rule - assert "seconds: 432000" in rule - assert "max_num_versions: 2" in rule - column_family_obj.delete() - - -def test_bigtable_create_family_gc_intersection(): - # [START bigtable_create_family_gc_intersection] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - max_versions_rule = column_family.MaxVersionsGCRule(2) - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - - intersection_rule = column_family.GCRuleIntersection( - [max_versions_rule, max_age_rule] - ) - - column_family_obj = table.column_family("cf4", intersection_rule) - column_family_obj.create() - - # [END bigtable_create_family_gc_intersection] - - rule = str(column_family_obj.to_pb()) - assert "intersection" in rule - assert "max_num_versions: 2" in rule - assert "max_age" in rule - assert "seconds: 432000" in rule - column_family_obj.delete() - - -def test_bigtable_create_family_gc_nested(): - # [START bigtable_create_family_gc_nested] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Create a column family with nested GC policies. - # Create a nested GC rule: - # Drop cells that are either older than the 10 recent versions - # OR - # Drop cells that are older than a month AND older than the - # 2 recent versions - rule1 = column_family.MaxVersionsGCRule(10) - rule2 = column_family.GCRuleIntersection( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2), - ] - ) - - nested_rule = column_family.GCRuleUnion([rule1, rule2]) - - column_family_obj = table.column_family("cf5", nested_rule) - column_family_obj.create() - - # [END bigtable_create_family_gc_nested] - - rule = str(column_family_obj.to_pb()) - assert "intersection" in rule - assert "max_num_versions: 2" in rule - assert "max_age" in rule - assert "seconds: 432000" in rule - column_family_obj.delete() - - -def test_bigtable_row_data_cells_cell_value_cell_values(): - - value = b"value_in_col1" - row = Config.TABLE.row(b"row_key_1") - row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() - ) - row.commit() - - row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() - ) - row.commit() - - # [START bigtable_row_data_cells] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key = "row_key_1" - row_data = table.read_row(row_key) - - cells = row_data.cells - # [END bigtable_row_data_cells] - - actual_cell_value = cells[COLUMN_FAMILY_ID][COL_NAME1][0].value - assert actual_cell_value == value - - # [START bigtable_row_cell_value] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key = "row_key_1" - row_data = table.read_row(row_key) - - cell_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) - # [END bigtable_row_cell_value] - assert cell_value == value - - # [START bigtable_row_cell_values] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key = "row_key_1" - row_data = table.read_row(row_key) - - cell_values = row_data.cell_values(COLUMN_FAMILY_ID, COL_NAME1) - # [END bigtable_row_cell_values] - - for actual_value, timestamp in cell_values: - assert actual_value == value - - value2 = b"value_in_col2" - row.set_cell(COLUMN_FAMILY_ID, COL_NAME2, value2) - row.commit() - - # [START bigtable_row_find_cells] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row_key = "row_key_1" - row = table.read_row(row_key) - - cells = row.find_cells(COLUMN_FAMILY_ID, COL_NAME2) - # [END bigtable_row_find_cells] - - assert cells[0].value == value2 - table.truncate(timeout=200) - - -def test_bigtable_row_setcell_rowkey(): - # [START bigtable_row_set_cell] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row = table.row(ROW_KEY1) - - cell_val = b"cell-val" - row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow() - ) - # [END bigtable_row_set_cell] - - response = table.mutate_rows([row]) - # validate that all rows written successfully - for i, status in enumerate(response): - assert status.code == 0 - - # [START bigtable_row_row_key] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row = table.row(ROW_KEY1) - row_key = row.row_key - # [END bigtable_row_row_key] - assert row_key == ROW_KEY1 - - # [START bigtable_row_table] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row = table.row(ROW_KEY1) - table1 = row.table - # [END bigtable_row_table] - - assert table1 == table - table.truncate(timeout=200) - - -def test_bigtable_row_delete(): - table_row_del = Config.INSTANCE.table(TABLE_ID) - row_obj = table_row_del.row(b"row_key_1") - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - row_obj.commit() - written_row_keys = [] - for row in table_row_del.read_rows(): - written_row_keys.append(row.row_key) - assert written_row_keys == [b"row_key_1"] - - # [START bigtable_row_delete] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - row_obj = table.row(row_key) - - row_obj.delete() - row_obj.commit() - # [END bigtable_row_delete] - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - assert len(written_row_keys) == 0 - - -def test_bigtable_row_delete_cell(): - table_row_del_cell = Config.INSTANCE.table(TABLE_ID) - row_key1 = b"row_key_1" - row_obj = table_row_del_cell.row(row_key1) - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row_obj.commit() - - written_row_keys = [] - for row in table_row_del_cell.read_rows(): - written_row_keys.append(row.row_key) - assert written_row_keys == [row_key1] - - # [START bigtable_row_delete_cell] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - row_obj = table.row(row_key) - - row_obj.delete_cell(COLUMN_FAMILY_ID, COL_NAME1) - row_obj.commit() - # [END bigtable_row_delete_cell] - - for row in table.read_rows(): - assert not row.row_key - - -def test_bigtable_row_delete_cells(): - table_row_del_cells = Config.INSTANCE.table(TABLE_ID) - row_key1 = b"row_key_1" - row_obj = table_row_del_cells.row(row_key1) - - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) - row_obj.commit() - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2) - row_obj.commit() - - written_row_keys = [] - for row in table_row_del_cells.read_rows(): - written_row_keys.append(row.row_key) - assert written_row_keys == [row_key1] - - # [START bigtable_row_delete_cells] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - row_obj = table.row(row_key) - - row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2]) - row_obj.commit() - # [END bigtable_row_delete_cells] - - for row in table.read_rows(): - assert not row.row_key - - -def test_bigtable_row_clear(): - table_row_clear = Config.INSTANCE.table(TABLE_ID) - row_obj = table_row_clear.row(b"row_key_1") - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - - mutation_size = row_obj.get_mutations_size() - assert mutation_size > 0 - - # [START bigtable_row_clear] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - row_obj = table.row(row_key) - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - - row_obj.clear() - # [END bigtable_row_clear] - - mutation_size = row_obj.get_mutations_size() - assert mutation_size == 0 - - -def test_bigtable_row_clear_get_mutations_size(): - # [START bigtable_row_get_mutations_size] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key_id = b"row_key_1" - row_obj = table.row(row_key_id) - - mutation_size = row_obj.get_mutations_size() - # [END bigtable_row_get_mutations_size] - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - mutation_size = row_obj.get_mutations_size() - assert mutation_size > 0 - - row_obj.clear() - mutation_size = row_obj.get_mutations_size() - assert mutation_size == 0 - - -def test_bigtable_row_setcell_commit_rowkey(): - # [START bigtable_row_set_cell] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - cell_val = b"cell-val" - row_obj = table.row(row_key) - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) - # [END bigtable_row_set_cell] - row_obj.commit() - - # [START bigtable_row_commit] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_2" - cell_val = b"cell-val" - row_obj = table.row(row_key) - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) - row_obj.commit() - # [END bigtable_row_commit] - - written_row_keys = [] - for row in table.read_rows(): - written_row_keys.append(row.row_key) - - assert written_row_keys == [b"row_key_1", b"row_key_2"] - - # [START bigtable_row_row_key] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key_id = b"row_key_2" - row_obj = table.row(row_key_id) - row_key = row_obj.row_key - # [END bigtable_row_row_key] - assert row_key == row_key_id - table.truncate(timeout=300) - - -def test_bigtable_row_append_cell_value(): - row = Config.TABLE.row(ROW_KEY1) - - cell_val1 = b"1" - row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val1) - row.commit() - - # [START bigtable_row_append_cell_value] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row = table.row(ROW_KEY1, append=True) - - cell_val2 = b"2" - row.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, cell_val2) - # [END bigtable_row_append_cell_value] - row.commit() - - row_data = table.read_row(ROW_KEY1) - actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) - assert actual_value == cell_val1 + cell_val2 - - # [START bigtable_row_commit] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row = Config.TABLE.row(ROW_KEY2) - cell_val = 1 - row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) - row.commit() - # [END bigtable_row_commit] - - # [START bigtable_row_increment_cell_value] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - row = table.row(ROW_KEY2, append=True) - - int_val = 3 - row.increment_cell_value(COLUMN_FAMILY_ID, COL_NAME1, int_val) - # [END bigtable_row_increment_cell_value] - row.commit() - - row_data = table.read_row(ROW_KEY2) - actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) - - import struct - - _PACK_I64 = struct.Struct(">q").pack - assert actual_value == _PACK_I64(cell_val + int_val) - table.truncate(timeout=200) - - -if __name__ == "__main__": - pytest.main() diff --git a/bigtable/docs/table-api.rst b/bigtable/docs/table-api.rst deleted file mode 100644 index 5168aad49ff7..000000000000 --- a/bigtable/docs/table-api.rst +++ /dev/null @@ -1,154 +0,0 @@ -Table Admin API -=============== - -After creating an :class:`Instance `, you can -interact with individual tables, groups of tables or column families within -a table. - -List Tables ------------ - -If you want a comprehensive list of all existing tables in a instance, make a -`ListTables`_ API request with -:meth:`Instance.list_tables() `: - -.. code:: python - - >>> instance.list_tables() - [, - ] - -Table Factory -------------- - -To create a :class:`Table ` object: - -.. code:: python - - table = instance.table(table_id) - -Even if this :class:`Table ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`ColumnFamily ` -or :class:`Row `. - -Create a new Table ------------------- - -After creating the table object, make a `CreateTable`_ API request -with :meth:`create() `: - -.. code:: python - - table.create() - -If you would like to initially split the table into several tablets (tablets are -similar to HBase regions): - -.. code:: python - - table.create(initial_split_keys=['s1', 's2']) - -Delete an existing Table ------------------------- - -Make a `DeleteTable`_ API request with -:meth:`delete() `: - -.. code:: python - - table.delete() - -List Column Families in a Table -------------------------------- - -Though there is no **official** method for retrieving `column families`_ -associated with a table, the `GetTable`_ API method returns a -table object with the names of the column families. - -To retrieve the list of column families use -:meth:`list_column_families() `: - -.. code:: python - - column_families = table.list_column_families() - -Column Family Factory ---------------------- - -To create a -:class:`ColumnFamily ` object: - -.. code:: python - - column_family = table.column_family(column_family_id) - -There is no real reason to use this factory unless you intend to -create or delete a column family. - -In addition, you can specify an optional ``gc_rule`` (a -:class:`GarbageCollectionRule ` -or similar): - -.. code:: python - - column_family = table.column_family(column_family_id, - gc_rule=gc_rule) - -This rule helps the backend determine when and how to clean up old cells -in the column family. - -See :doc:`column-family` for more information about -:class:`GarbageCollectionRule ` -and related classes. - -Create a new Column Family --------------------------- - -After creating the column family object, make a `CreateColumnFamily`_ API -request with -:meth:`ColumnFamily.create() ` - -.. code:: python - - column_family.create() - -Delete an existing Column Family --------------------------------- - -Make a `DeleteColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` - -.. code:: python - - column_family.delete() - -Update an existing Column Family --------------------------------- - -Make an `UpdateColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` - -.. code:: python - - column_family.update() - -Next Step ---------- - -Now we go down the final step of the hierarchy from -:class:`Table ` to -:class:`Row ` as well as streaming -data directly via a :class:`Table `. - -Head next to learn about the :doc:`data-api`. - -.. _ListTables: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L40-L42 -.. _CreateTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L35-L37 -.. _DeleteTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L50-L52 -.. _RenameTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L56-L58 -.. _GetTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L45-L47 -.. _CreateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L61-L63 -.. _UpdateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L66-L68 -.. _DeleteColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L71-L73 -.. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers diff --git a/bigtable/docs/table.rst b/bigtable/docs/table.rst deleted file mode 100644 index c230725d1351..000000000000 --- a/bigtable/docs/table.rst +++ /dev/null @@ -1,6 +0,0 @@ -Table -~~~~~ - -.. automodule:: google.cloud.bigtable.table - :members: - :show-inheritance: diff --git a/bigtable/docs/usage.rst b/bigtable/docs/usage.rst deleted file mode 100644 index aa8d899d58cb..000000000000 --- a/bigtable/docs/usage.rst +++ /dev/null @@ -1,28 +0,0 @@ -Using the API -============= - -.. toctree:: - :maxdepth: 2 - - client-intro - client - cluster - instance - table - column-family - row - row-data - row-filters - - -In the hierarchy of API concepts - -* a :class:`Client ` owns an - :class:`Instance ` -* an :class:`Instance ` owns a - :class:`Table ` -* a :class:`Table ` owns a - :class:`ColumnFamily ` -* a :class:`Table ` owns a - :class:`Row ` - (and all the cells in the row) diff --git a/bigtable/google/__init__.py b/bigtable/google/__init__.py deleted file mode 100644 index abc37089339e..000000000000 --- a/bigtable/google/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable API package.""" - - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/bigtable/google/cloud/__init__.py b/bigtable/google/cloud/__init__.py deleted file mode 100644 index 2f4b4738aee1..000000000000 --- a/bigtable/google/cloud/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/bigtable/google/cloud/bigtable.py b/bigtable/google/cloud/bigtable.py deleted file mode 100644 index 72858878e8a7..000000000000 --- a/bigtable/google/cloud/bigtable.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -from google.cloud.bigtable_v2 import BigtableClient -from google.cloud.bigtable_v2 import types - -__all__ = ("types", "BigtableClient") diff --git a/bigtable/google/cloud/bigtable/__init__.py b/bigtable/google/cloud/bigtable/__init__.py deleted file mode 100644 index 75b765a8a0da..000000000000 --- a/bigtable/google/cloud/bigtable/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable API package.""" - - -from pkg_resources import get_distribution - -__version__ = get_distribution("google-cloud-bigtable").version - -from google.cloud.bigtable.client import Client - - -__all__ = ["__version__", "Client"] diff --git a/bigtable/google/cloud/bigtable/app_profile.py b/bigtable/google/cloud/bigtable/app_profile.py deleted file mode 100644 index cb04ebfc78c7..000000000000 --- a/bigtable/google/cloud/bigtable/app_profile.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright 2018 Google LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable AppProfile.""" - - -import re - -from google.cloud.bigtable.enums import RoutingPolicyType -from google.cloud.bigtable_admin_v2.types import instance_pb2 -from google.protobuf import field_mask_pb2 -from google.api_core.exceptions import NotFound - -_APP_PROFILE_NAME_RE = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/" - r"appProfiles/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" -) - - -class AppProfile(object): - """Representation of a Google Cloud Bigtable AppProfile. - - We can use a :class:`AppProfile` to: - - * :meth:`reload` itself - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - :type app_profile_id: str - :param app_profile_id: The ID of the AppProfile. Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type: routing_policy_type: int - :param: routing_policy_type: (Optional) The type of the routing policy. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` - :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` - - :type: description: str - :param: description: (Optional) Long form description of the use - case for this AppProfile. - - :type: cluster_id: str - :param: cluster_id: (Optional) Unique cluster_id which is only required - when routing_policy_type is - ROUTING_POLICY_TYPE_SINGLE. - - :type: allow_transactional_writes: bool - :param: allow_transactional_writes: (Optional) If true, allow - transactional writes for - ROUTING_POLICY_TYPE_SINGLE. - """ - - def __init__( - self, - app_profile_id, - instance, - routing_policy_type=None, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ): - self.app_profile_id = app_profile_id - self._instance = instance - self.routing_policy_type = routing_policy_type - self.description = description - self.cluster_id = cluster_id - self.allow_transactional_writes = allow_transactional_writes - - @property - def name(self): - """AppProfile name used in requests. - - .. note:: - - This property will not change if ``app_profile_id`` does not, but - the return value is not cached. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_app_profile_name] - :end-before: [END bigtable_app_profile_name] - - The AppProfile name is of the form - ``"projects/../instances/../app_profile/{app_profile_id}"`` - - :rtype: str - :returns: The AppProfile name. - """ - return self.instance_admin_client.app_profile_path( - self._instance._client.project, - self._instance.instance_id, - self.app_profile_id, - ) - - @property - def instance_admin_client(self): - """Shortcut to instance_admin_client - - :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` - :returns: A BigtableInstanceAdmin instance. - """ - return self._instance._client.instance_admin_client - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - # NOTE: This does not compare the configuration values, such as - # the routing_policy_type. Instead, it only compares - # identifying values instance, AppProfile ID and client. This is - # intentional, since the same AppProfile can be in different - # states if not synchronized. - return ( - other.app_profile_id == self.app_profile_id - and other._instance == self._instance - ) - - def __ne__(self, other): - return not self == other - - @classmethod - def from_pb(cls, app_profile_pb, instance): - """Creates an instance app_profile from a protobuf. - - :type app_profile_pb: :class:`instance_pb2.app_profile_pb` - :param app_profile_pb: An instance protobuf object. - - :type instance: :class:`google.cloud.bigtable.instance.Instance` - :param instance: The instance that owns the cluster. - - :rtype: :class:`AppProfile` - :returns: The AppProfile parsed from the protobuf response. - - :raises: :class:`ValueError ` if the AppProfile - name does not match - ``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}`` - or if the parsed instance ID does not match the istance ID - on the client. - or if the parsed project ID does not match the project ID - on the client. - """ - match_app_profile_name = _APP_PROFILE_NAME_RE.match(app_profile_pb.name) - if match_app_profile_name is None: - raise ValueError( - "AppProfile protobuf name was not in the " "expected format.", - app_profile_pb.name, - ) - if match_app_profile_name.group("instance") != instance.instance_id: - raise ValueError( - "Instance ID on app_profile does not match the " - "instance ID on the client" - ) - if match_app_profile_name.group("project") != instance._client.project: - raise ValueError( - "Project ID on app_profile does not match the " - "project ID on the client" - ) - app_profile_id = match_app_profile_name.group("app_profile_id") - - result = cls(app_profile_id, instance) - result._update_from_pb(app_profile_pb) - return result - - def _update_from_pb(self, app_profile_pb): - """Refresh self from the server-provided protobuf. - Helper for :meth:`from_pb` and :meth:`reload`. - """ - self.routing_policy_type = None - self.allow_transactional_writes = None - self.cluster_id = None - - self.description = app_profile_pb.description - - routing_policy_type = None - if app_profile_pb.HasField("multi_cluster_routing_use_any"): - routing_policy_type = RoutingPolicyType.ANY - self.allow_transactional_writes = False - else: - routing_policy_type = RoutingPolicyType.SINGLE - self.cluster_id = app_profile_pb.single_cluster_routing.cluster_id - self.allow_transactional_writes = ( - app_profile_pb.single_cluster_routing.allow_transactional_writes - ) - self.routing_policy_type = routing_policy_type - - def _to_pb(self): - """Create an AppProfile proto buff message for API calls - :rtype: :class:`.instance_pb2.AppProfile` - :returns: The converted current object. - - :raises: :class:`ValueError ` if the AppProfile - routing_policy_type is not set - """ - if not self.routing_policy_type: - raise ValueError("AppProfile required routing policy.") - - single_cluster_routing = None - multi_cluster_routing_use_any = None - - if self.routing_policy_type == RoutingPolicyType.ANY: - multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny() - ) - else: - single_cluster_routing = instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=self.cluster_id, - allow_transactional_writes=self.allow_transactional_writes, - ) - - app_profile_pb = instance_pb2.AppProfile( - name=self.name, - description=self.description, - multi_cluster_routing_use_any=multi_cluster_routing_use_any, - single_cluster_routing=single_cluster_routing, - ) - return app_profile_pb - - def reload(self): - """Reload the metadata for this cluster - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_app_profile] - :end-before: [END bigtable_reload_app_profile] - """ - - app_profile_pb = self.instance_admin_client.get_app_profile(self.name) - - # NOTE: _update_from_pb does not check that the project and - # app_profile ID on the response match the request. - self._update_from_pb(app_profile_pb) - - def exists(self): - """Check whether the AppProfile already exists. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_app_profile_exists] - :end-before: [END bigtable_app_profile_exists] - - :rtype: bool - :returns: True if the AppProfile exists, else False. - """ - try: - self.instance_admin_client.get_app_profile(self.name) - return True - # NOTE: There could be other exceptions that are returned to the user. - except NotFound: - return False - - def create(self, ignore_warnings=None): - """Create this AppProfile. - - .. note:: - - Uses the ``instance`` and ``app_profile_id`` on the current - :class:`AppProfile` in addition to the ``routing_policy_type``, - ``description``, ``cluster_id`` and ``allow_transactional_writes``. - To change them before creating, reset the values via - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_app_profile] - :end-before: [END bigtable_create_app_profile] - - :type: ignore_warnings: bool - :param: ignore_warnings: (Optional) If true, ignore safety checks when - creating the AppProfile. - """ - return self.from_pb( - self.instance_admin_client.create_app_profile( - parent=self._instance.name, - app_profile_id=self.app_profile_id, - app_profile=self._to_pb(), - ignore_warnings=ignore_warnings, - ), - self._instance, - ) - - def update(self, ignore_warnings=None): - """Update this app_profile. - - .. note:: - - Update any or all of the following values: - ``routing_policy_type`` - ``description`` - ``cluster_id`` - ``allow_transactional_writes`` - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_update_app_profile] - :end-before: [END bigtable_update_app_profile] - """ - update_mask_pb = field_mask_pb2.FieldMask() - - if self.description is not None: - update_mask_pb.paths.append("description") - - if self.routing_policy_type == RoutingPolicyType.ANY: - update_mask_pb.paths.append("multi_cluster_routing_use_any") - else: - update_mask_pb.paths.append("single_cluster_routing") - - return self.instance_admin_client.update_app_profile( - app_profile=self._to_pb(), - update_mask=update_mask_pb, - ignore_warnings=ignore_warnings, - ) - - def delete(self, ignore_warnings=None): - """Delete this AppProfile. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_app_profile] - :end-before: [END bigtable_delete_app_profile] - - :type: ignore_warnings: bool - :param: ignore_warnings: If true, ignore safety checks when deleting - the AppProfile. - - :raises: google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. google.api_core.exceptions.RetryError: - If the request failed due to a retryable error and retry - attempts failed. ValueError: If the parameters are invalid. - """ - self.instance_admin_client.delete_app_profile(self.name, ignore_warnings) diff --git a/bigtable/google/cloud/bigtable/batcher.py b/bigtable/google/cloud/bigtable/batcher.py deleted file mode 100644 index 3a649049b66d..000000000000 --- a/bigtable/google/cloud/bigtable/batcher.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable MutationBatcher.""" - - -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB - - -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" - - -class MutationsBatcher(object): - """ A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud - Bigtable. Batching mutations is more efficient than sending individual - request. - - This class is not suited for usage in systems where each mutation - needs to guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in - memory will not necessarily be sent to the service, even after the - completion of the mutate() method. - - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. - - :type table: class - :param table: class:`~google.cloud.bigtable.table.Table`. - - :type flush_count: int - :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). - - :type max_row_bytes: int - :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). - """ - - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 - self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes - - def mutate(self, row): - """ Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_mutate] - :end-before: [END bigtable_batcher_mutate] - - :type row: class - :param row: class:`~google.cloud.bigtable.row.DirectRow`. - - :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() - - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() - - def mutate_rows(self, rows): - """ Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_mutate_rows] - :end-before: [END bigtable_batcher_mutate_rows] - - :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] - :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. - - :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - for row in rows: - self.mutate(row) - - def flush(self): - """ Sends the current. batch to Cloud Bigtable. - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_flush] - :end-before: [END bigtable_batcher_flush] - - """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] diff --git a/bigtable/google/cloud/bigtable/client.py b/bigtable/google/cloud/bigtable/client.py deleted file mode 100644 index 8a8315623cae..000000000000 --- a/bigtable/google/cloud/bigtable/client.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parent client for calling the Google Cloud Bigtable API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`~google.cloud.bigtable.client.Client` owns an - :class:`~google.cloud.bigtable.instance.Instance` -* an :class:`~google.cloud.bigtable.instance.Instance` owns a - :class:`~google.cloud.bigtable.table.Table` -* a :class:`~google.cloud.bigtable.table.Table` owns a - :class:`~.column_family.ColumnFamily` -* a :class:`~google.cloud.bigtable.table.Table` owns a - :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row) -""" -import os -import warnings -import grpc - -from google.api_core.gapic_v1 import client_info - -from google.cloud import bigtable_v2 -from google.cloud import bigtable_admin_v2 - -from google.cloud.bigtable import __version__ -from google.cloud.bigtable.instance import Instance -from google.cloud.bigtable.cluster import Cluster - -from google.cloud.client import ClientWithProject - -from google.cloud.bigtable_admin_v2 import enums -from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE -from google.cloud.environment_vars import BIGTABLE_EMULATOR - - -INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION -INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT -INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED -_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) -SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" -ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" -"""Scope for interacting with the Cluster Admin and Table Admin APIs.""" -DATA_SCOPE = "https://www.googleapis.com/auth/bigtable.data" -"""Scope for reading and writing table data.""" -READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" -"""Scope for reading table data.""" - - -def _create_gapic_client(client_class, client_options=None): - def inner(self): - if self._emulator_host is None: - return client_class( - credentials=self._credentials, - client_info=self._client_info, - client_options=client_options, - ) - else: - return client_class( - channel=self._emulator_channel, client_info=self._client_info - ) - - return inner - - -class Client(ClientWithProject): - """Client for interacting with Google Cloud Bigtable API. - - .. note:: - - Since the Cloud Bigtable API requires the gRPC transport, no - ``_http`` argument is accepted by this class. - - :type project: :class:`str` or :func:`unicode ` - :param project: (Optional) The ID of the project which owns the - instances, tables and data. If not provided, will - attempt to determine from the environment. - - :type credentials: :class:`~google.auth.credentials.Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - client. If not passed, falls back to the default - inferred from the environment. - - :type read_only: bool - :param read_only: (Optional) Boolean indicating if the data scope should be - for reading only (or for writing as well). Defaults to - :data:`False`. - - :type admin: bool - :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Instance Admin or Table Admin APIs. This - requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. - - :type: client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo` - :param client_info: - The client info used to send a user-agent string along with API - requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library - or partner tool. - - :type client_options: :class:`~google.api_core.client_options.ClientOptions` - or :class:`dict` - :param client_options: (Optional) Client options used to set user options - on the client. API Endpoint should be set through client_options. - - :type admin_client_options: - :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` - :param admin_client_options: (Optional) Client options used to set user - options on the client. API Endpoint for admin operations should be set - through admin_client_options. - - :type channel: :instance: grpc.Channel - :param channel (grpc.Channel): (Optional) DEPRECATED: - A ``Channel`` instance through which to make calls. - This argument is mutually exclusive with ``credentials``; - providing both will raise an exception. No longer used. - - :raises: :class:`ValueError ` if both ``read_only`` - and ``admin`` are :data:`True` - """ - - _table_data_client = None - _table_admin_client = None - _instance_admin_client = None - - def __init__( - self, - project=None, - credentials=None, - read_only=False, - admin=False, - client_info=_CLIENT_INFO, - client_options=None, - admin_client_options=None, - channel=None, - ): - if read_only and admin: - raise ValueError( - "A read-only client cannot also perform" "administrative actions." - ) - - # NOTE: We set the scopes **before** calling the parent constructor. - # It **may** use those scopes in ``with_scopes_if_required``. - self._read_only = bool(read_only) - self._admin = bool(admin) - self._client_info = client_info - self._emulator_host = os.getenv(BIGTABLE_EMULATOR) - self._emulator_channel = None - - if self._emulator_host is not None: - self._emulator_channel = grpc.insecure_channel(self._emulator_host) - - if channel is not None: - warnings.warn( - "'channel' is deprecated and no longer used.", - DeprecationWarning, - stacklevel=2, - ) - - self._client_options = client_options - self._admin_client_options = admin_client_options - self._channel = channel - self.SCOPE = self._get_scopes() - super(Client, self).__init__(project=project, credentials=credentials) - - def _get_scopes(self): - """Get the scopes corresponding to admin / read-only state. - - Returns: - Tuple[str, ...]: The tuple of scopes. - """ - if self._read_only: - scopes = (READ_ONLY_SCOPE,) - else: - scopes = (DATA_SCOPE,) - - if self._admin: - scopes += (ADMIN_SCOPE,) - - return scopes - - @property - def project_path(self): - """Project name to be used with Instance Admin API. - - .. note:: - - This property will not change if ``project`` does not, but the - return value is not cached. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_project_path] - :end-before: [END bigtable_project_path] - - The project name is of the form - - ``"projects/{project}"`` - - :rtype: str - :returns: Return a fully-qualified project string. - """ - return self.instance_admin_client.project_path(self.project) - - @property - def table_data_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_table_data_client] - :end-before: [END bigtable_table_data_client] - - :rtype: :class:`.bigtable_v2.BigtableClient` - :returns: A BigtableClient object. - """ - if self._table_data_client is None: - klass = _create_gapic_client( - bigtable_v2.BigtableClient, client_options=self._client_options - ) - self._table_data_client = klass(self) - return self._table_data_client - - @property - def table_admin_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_table_admin_client] - :end-before: [END bigtable_table_admin_client] - - :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` - :returns: A BigtableTableAdmin instance. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if self._table_admin_client is None: - if not self._admin: - raise ValueError("Client is not an admin client.") - klass = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient, - client_options=self._admin_client_options, - ) - self._table_admin_client = klass(self) - return self._table_admin_client - - @property - def instance_admin_client(self): - """Getter for the gRPC stub used for the Table Admin API. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_admin_client] - :end-before: [END bigtable_instance_admin_client] - - :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` - :returns: A BigtableInstanceAdmin instance. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if self._instance_admin_client is None: - if not self._admin: - raise ValueError("Client is not an admin client.") - klass = _create_gapic_client( - bigtable_admin_v2.BigtableInstanceAdminClient, - client_options=self._admin_client_options, - ) - self._instance_admin_client = klass(self) - return self._instance_admin_client - - def instance(self, instance_id, display_name=None, instance_type=None, labels=None): - """Factory to create a instance associated with this client. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_prod_instance] - :end-before: [END bigtable_create_prod_instance] - - :type instance_id: str - :param instance_id: The ID of the instance. - - :type display_name: str - :param display_name: (Optional) The display name for the instance in - the Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the instance ID. - - :type instance_type: int - :param instance_type: (Optional) The type of the instance. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. - :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, - Defaults to - :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. - - :type labels: dict - :param labels: (Optional) Labels are a flexible and lightweight - mechanism for organizing cloud resources into groups - that reflect a customer's organizational needs and - deployment strategies. They can be used to filter - resources and aggregate metrics. Label keys must be - between 1 and 63 characters long. Maximum 64 labels can - be associated with a given resource. Label values must - be between 0 and 63 characters long. Keys and values - must both be under 128 bytes. - - :rtype: :class:`~google.cloud.bigtable.instance.Instance` - :returns: an instance owned by this client. - """ - return Instance( - instance_id, - self, - display_name=display_name, - instance_type=instance_type, - labels=labels, - ) - - def list_instances(self): - """List instances owned by the project. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_list_instances] - :end-before: [END bigtable_list_instances] - - :rtype: tuple - :returns: - (instances, failed_locations), where 'instances' is list of - :class:`google.cloud.bigtable.instance.Instance`, and - 'failed_locations' is a list of locations which could not - be resolved. - """ - resp = self.instance_admin_client.list_instances(self.project_path) - instances = [Instance.from_pb(instance, self) for instance in resp.instances] - return instances, resp.failed_locations - - def list_clusters(self): - """List the clusters in the project. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_list_clusters_in_project] - :end-before: [END bigtable_list_clusters_in_project] - - :rtype: tuple - :returns: - (clusters, failed_locations), where 'clusters' is list of - :class:`google.cloud.bigtable.instance.Cluster`, and - 'failed_locations' is a list of strings representing - locations which could not be resolved. - """ - resp = self.instance_admin_client.list_clusters( - self.instance_admin_client.instance_path(self.project, "-") - ) - clusters = [] - instances = {} - for cluster in resp.clusters: - match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name) - instance_id = match_cluster_name.group("instance") - if instance_id not in instances: - instances[instance_id] = self.instance(instance_id) - clusters.append(Cluster.from_pb(cluster, instances[instance_id])) - return clusters, resp.failed_locations diff --git a/bigtable/google/cloud/bigtable/cluster.py b/bigtable/google/cloud/bigtable/cluster.py deleted file mode 100644 index 5ff1d0404b94..000000000000 --- a/bigtable/google/cloud/bigtable/cluster.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Cluster.""" - - -import re -from google.cloud.bigtable_admin_v2.types import instance_pb2 -from google.api_core.exceptions import NotFound - - -_CLUSTER_NAME_RE = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/clusters/" - r"(?P[a-z][-a-z0-9]*)$" -) - - -class Cluster(object): - """Representation of a Google Cloud Bigtable Cluster. - - We can use a :class:`Cluster` to: - - * :meth:`reload` itself - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - :type cluster_id: str - :param cluster_id: The ID of the cluster. - - :type instance: :class:`~google.cloud.bigtable.instance.Instance` - :param instance: The instance where the cluster resides. - - :type location_id: str - :param location_id: (Creation Only) The location where this cluster's - nodes and storage reside . For best performance, - clients should be located as close as possible to - this cluster. - For list of supported locations refer to - https://cloud.google.com/bigtable/docs/locations - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - - :type default_storage_type: int - :param default_storage_type: (Optional) The type of storage - Possible values are represented by the - following constants: - :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, - Defaults to - :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. - - :type _state: int - :param _state: (`OutputOnly`) - The current state of the cluster. - Possible values are represented by the following constants: - :data:`google.cloud.bigtable.enums.Cluster.State.NOT_KNOWN`. - :data:`google.cloud.bigtable.enums.Cluster.State.READY`. - :data:`google.cloud.bigtable.enums.Cluster.State.CREATING`. - :data:`google.cloud.bigtable.enums.Cluster.State.RESIZING`. - :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`. - """ - - def __init__( - self, - cluster_id, - instance, - location_id=None, - serve_nodes=None, - default_storage_type=None, - _state=None, - ): - self.cluster_id = cluster_id - self._instance = instance - self.location_id = location_id - self.serve_nodes = serve_nodes - self.default_storage_type = default_storage_type - self._state = _state - - @classmethod - def from_pb(cls, cluster_pb, instance): - """Creates an cluster instance from a protobuf. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_from_pb] - :end-before: [END bigtable_cluster_from_pb] - - :type cluster_pb: :class:`instance_pb2.Cluster` - :param cluster_pb: An instance protobuf object. - - :type instance: :class:`google.cloud.bigtable.instance.Instance` - :param instance: The instance that owns the cluster. - - :rtype: :class:`Cluster` - :returns: The Cluster parsed from the protobuf response. - :raises: :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/instances/{instance_id}/clusters/{cluster_id}`` - or if the parsed instance ID does not match the istance ID - on the client. - or if the parsed project ID does not match the project ID - on the client. - """ - match_cluster_name = _CLUSTER_NAME_RE.match(cluster_pb.name) - if match_cluster_name is None: - raise ValueError( - "Cluster protobuf name was not in the " "expected format.", - cluster_pb.name, - ) - if match_cluster_name.group("instance") != instance.instance_id: - raise ValueError( - "Instance ID on cluster does not match the " "instance ID on the client" - ) - if match_cluster_name.group("project") != instance._client.project: - raise ValueError( - "Project ID on cluster does not match the " "project ID on the client" - ) - cluster_id = match_cluster_name.group("cluster_id") - - result = cls(cluster_id, instance) - result._update_from_pb(cluster_pb) - return result - - def _update_from_pb(self, cluster_pb): - """Refresh self from the server-provided protobuf. - Helper for :meth:`from_pb` and :meth:`reload`. - """ - - self.location_id = cluster_pb.location.split("/")[-1] - self.serve_nodes = cluster_pb.serve_nodes - self.default_storage_type = cluster_pb.default_storage_type - self._state = cluster_pb.state - - @property - def name(self): - """Cluster name used in requests. - - .. note:: - This property will not change if ``_instance`` and ``cluster_id`` - do not, but the return value is not cached. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_name] - :end-before: [END bigtable_cluster_name] - - The cluster name is of the form - - ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` - - :rtype: str - :returns: The cluster name. - """ - return self._instance._client.instance_admin_client.cluster_path( - self._instance._client.project, self._instance.instance_id, self.cluster_id - ) - - @property - def state(self): - """google.cloud.bigtable.enums.Cluster.State: state of cluster. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_state] - :end-before: [END bigtable_cluster_state] - - """ - return self._state - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - # NOTE: This does not compare the configuration values, such as - # the serve_nodes. Instead, it only compares - # identifying values instance, cluster ID and client. This is - # intentional, since the same cluster can be in different states - # if not synchronized. Clusters with similar instance/cluster - # settings but different clients can't be used in the same way. - return other.cluster_id == self.cluster_id and other._instance == self._instance - - def __ne__(self, other): - return not self == other - - def reload(self): - """Reload the metadata for this cluster. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_cluster] - :end-before: [END bigtable_reload_cluster] - """ - cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) - - # NOTE: _update_from_pb does not check that the project and - # cluster ID on the response match the request. - self._update_from_pb(cluster_pb) - - def exists(self): - """Check whether the cluster already exists. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_check_cluster_exists] - :end-before: [END bigtable_check_cluster_exists] - - :rtype: bool - :returns: True if the table exists, else False. - """ - client = self._instance._client - try: - client.instance_admin_client.get_cluster(name=self.name) - return True - # NOTE: There could be other exceptions that are returned to the user. - except NotFound: - return False - - def create(self): - """Create this cluster. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_cluster] - :end-before: [END bigtable_create_cluster] - - .. note:: - - Uses the ``project``, ``instance`` and ``cluster_id`` on the - current :class:`Cluster` in addition to the ``serve_nodes``. - To change them before creating, reset the values via - - .. code:: python - - cluster.serve_nodes = 8 - cluster.cluster_id = 'i-changed-my-mind' - - before calling :meth:`create`. - - :rtype: :class:`~google.api_core.operation.Operation` - :returns: The long-running operation corresponding to the - create operation. - """ - client = self._instance._client - cluster_pb = self._to_pb() - - return client.instance_admin_client.create_cluster( - self._instance.name, self.cluster_id, cluster_pb - ) - - def update(self): - """Update this cluster. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_update_cluster] - :end-before: [END bigtable_update_cluster] - - .. note:: - - Updates the ``serve_nodes``. If you'd like to - change them before updating, reset the values via - - .. code:: python - - cluster.serve_nodes = 8 - - before calling :meth:`update`. - - :type location: :str:``CreationOnly`` - :param location: The location where this cluster's nodes and storage - reside. For best performance, clients should be located as - close as possible to this cluster. Currently only zones are - supported, so values should be of the form - ``projects//locations/``. - - :type serve_nodes: :int - :param serve_nodes: The number of nodes allocated to this cluster. - More nodes enable higher throughput and more consistent - performance. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - update operation. - """ - client = self._instance._client - # We are passing `None` for second argument location. - # Location is set only at the time of creation of a cluster - # and can not be changed after cluster has been created. - return client.instance_admin_client.update_cluster( - self.name, self.serve_nodes, None - ) - - def delete(self): - """Delete this cluster. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_cluster] - :end-before: [END bigtable_delete_cluster] - - Marks a cluster and all of its tables for permanent deletion in 7 days. - - Immediately upon completion of the request: - - * Billing will cease for all of the cluster's reserved resources. - * The cluster's ``delete_time`` field will be set 7 days in the future. - - Soon afterward: - - * All tables within the cluster will become unavailable. - - At the cluster's ``delete_time``: - - * The cluster and **all of its tables** will immediately and - irrevocably disappear from the API, and their data will be - permanently deleted. - """ - client = self._instance._client - client.instance_admin_client.delete_cluster(self.name) - - def _to_pb(self): - """ Create cluster proto buff message for API calls """ - client = self._instance._client - location = client.instance_admin_client.location_path( - client.project, self.location_id - ) - cluster_pb = instance_pb2.Cluster( - location=location, - serve_nodes=self.serve_nodes, - default_storage_type=self.default_storage_type, - ) - return cluster_pb diff --git a/bigtable/google/cloud/bigtable/column_family.py b/bigtable/google/cloud/bigtable/column_family.py deleted file mode 100644 index 8b536992faa7..000000000000 --- a/bigtable/google/cloud/bigtable/column_family.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Column Family.""" - - -from google.cloud import _helpers -from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, -) - - -class GarbageCollectionRule(object): - """Garbage collection rule for column families within a table. - - Cells in the column family (within a table) fitting the rule will be - deleted during garbage collection. - - .. note:: - - This class is a do-nothing base class for all GC rules. - - .. note:: - - A string ``gc_expression`` can also be used with API requests, but - that value would be superceded by a ``gc_rule``. As a result, we - don't support that feature and instead support via native classes. - """ - - -class MaxVersionsGCRule(GarbageCollectionRule): - """Garbage collection limiting the number of versions of a cell. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_max_versions] - :end-before: [END bigtable_create_family_gc_max_versions] - - :type max_num_versions: int - :param max_num_versions: The maximum number of versions - """ - - def __init__(self, max_num_versions): - self.max_num_versions = max_num_versions - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.max_num_versions == self.max_num_versions - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the garbage collection rule to a protobuf. - - :rtype: :class:`.table_v2_pb2.GcRule` - :returns: The converted current object. - """ - return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions) - - -class MaxAgeGCRule(GarbageCollectionRule): - """Garbage collection limiting the age of a cell. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_max_age] - :end-before: [END bigtable_create_family_gc_max_age] - - :type max_age: :class:`datetime.timedelta` - :param max_age: The maximum age allowed for a cell in the table. - """ - - def __init__(self, max_age): - self.max_age = max_age - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.max_age == self.max_age - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the garbage collection rule to a protobuf. - - :rtype: :class:`.table_v2_pb2.GcRule` - :returns: The converted current object. - """ - max_age = _helpers._timedelta_to_duration_pb(self.max_age) - return table_v2_pb2.GcRule(max_age=max_age) - - -class GCRuleUnion(GarbageCollectionRule): - """Union of garbage collection rules. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_union] - :end-before: [END bigtable_create_family_gc_union] - - :type rules: list - :param rules: List of :class:`GarbageCollectionRule`. - """ - - def __init__(self, rules): - self.rules = rules - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.rules == self.rules - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the union into a single GC rule as a protobuf. - - :rtype: :class:`.table_v2_pb2.GcRule` - :returns: The converted current object. - """ - union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules]) - return table_v2_pb2.GcRule(union=union) - - -class GCRuleIntersection(GarbageCollectionRule): - """Intersection of garbage collection rules. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_intersection] - :end-before: [END bigtable_create_family_gc_intersection] - - :type rules: list - :param rules: List of :class:`GarbageCollectionRule`. - """ - - def __init__(self, rules): - self.rules = rules - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.rules == self.rules - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the intersection into a single GC rule as a protobuf. - - :rtype: :class:`.table_v2_pb2.GcRule` - :returns: The converted current object. - """ - intersection = table_v2_pb2.GcRule.Intersection( - rules=[rule.to_pb() for rule in self.rules] - ) - return table_v2_pb2.GcRule(intersection=intersection) - - -class ColumnFamily(object): - """Representation of a Google Cloud Bigtable Column Family. - - We can use a :class:`ColumnFamily` to: - - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - :type column_family_id: str - :param column_family_id: The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type table: :class:`Table ` - :param table: The table that owns the column family. - - :type gc_rule: :class:`GarbageCollectionRule` - :param gc_rule: (Optional) The garbage collection settings for this - column family. - """ - - def __init__(self, column_family_id, table, gc_rule=None): - self.column_family_id = column_family_id - self._table = table - self.gc_rule = gc_rule - - @property - def name(self): - """Column family name used in requests. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_column_family_name] - :end-before: [END bigtable_column_family_name] - - .. note:: - - This property will not change if ``column_family_id`` does not, but - the return value is not cached. - - The Column family name is of the form - - ``"projects/../zones/../clusters/../tables/../columnFamilies/.."`` - - :rtype: str - :returns: The column family name. - """ - return self._table.name + "/columnFamilies/" + self.column_family_id - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.column_family_id == self.column_family_id - and other._table == self._table - and other.gc_rule == self.gc_rule - ) - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the column family to a protobuf. - - :rtype: :class:`.table_v2_pb2.ColumnFamily` - :returns: The converted current object. - """ - if self.gc_rule is None: - return table_v2_pb2.ColumnFamily() - else: - return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) - - def create(self): - """Create this column family. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_column_family] - :end-before: [END bigtable_create_column_family] - - """ - column_family = self.to_pb() - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, create=column_family - ) - - client = self._table._instance._client - # data it contains are the GC rule and the column family ID already - # stored on this instance. - client.table_admin_client.modify_column_families( - self._table.name, [modification] - ) - - def update(self): - """Update this column family. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_update_column_family] - :end-before: [END bigtable_update_column_family] - - .. note:: - - Only the GC rule can be updated. By changing the column family ID, - you will simply be referring to a different column family. - """ - column_family = self.to_pb() - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, update=column_family - ) - - client = self._table._instance._client - # data it contains are the GC rule and the column family ID already - # stored on this instance. - client.table_admin_client.modify_column_families( - self._table.name, [modification] - ) - - def delete(self): - """Delete this column family. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_delete_column_family] - :end-before: [END bigtable_delete_column_family] - - """ - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, drop=True - ) - - client = self._table._instance._client - # data it contains are the GC rule and the column family ID already - # stored on this instance. - client.table_admin_client.modify_column_families( - self._table.name, [modification] - ) - - -def _gc_rule_from_pb(gc_rule_pb): - """Convert a protobuf GC rule to a native object. - - :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` - :param gc_rule_pb: The GC rule to convert. - - :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` - :returns: An instance of one of the native rules defined - in :module:`column_family` or :data:`None` if no values were - set on the protobuf passed in. - :raises: :class:`ValueError ` if the rule name - is unexpected. - """ - rule_name = gc_rule_pb.WhichOneof("rule") - if rule_name is None: - return None - - if rule_name == "max_num_versions": - return MaxVersionsGCRule(gc_rule_pb.max_num_versions) - elif rule_name == "max_age": - max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) - return MaxAgeGCRule(max_age) - elif rule_name == "union": - return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules]) - elif rule_name == "intersection": - rules = [_gc_rule_from_pb(rule) for rule in gc_rule_pb.intersection.rules] - return GCRuleIntersection(rules) - else: - raise ValueError("Unexpected rule name", rule_name) diff --git a/bigtable/google/cloud/bigtable/enums.py b/bigtable/google/cloud/bigtable/enums.py deleted file mode 100644 index f0965779fc8b..000000000000 --- a/bigtable/google/cloud/bigtable/enums.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Wrappers for gapic enum types.""" - -from google.cloud.bigtable_admin_v2 import enums - - -class StorageType(object): - """ - Storage media types for persisting Bigtable data. - - Attributes: - UNSPECIFIED (int): The user did not specify a storage type. - SSD (int): Flash (SSD) storage should be used. - HDD (int): Magnetic drive (HDD) storage should be used. - """ - - UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED - SSD = enums.StorageType.SSD - HDD = enums.StorageType.HDD - - -class Instance(object): - class State(object): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be - determined. - READY (int): The instance has been successfully created and can - serve requests to its tables. - CREATING (int): The instance is currently being created, and may be - destroyed if the creation process encounters an error. - """ - - NOT_KNOWN = enums.Instance.State.STATE_NOT_KNOWN - READY = enums.Instance.State.READY - CREATING = enums.Instance.State.CREATING - - class Type(object): - """ - The type of the instance. - - Attributes: - UNSPECIFIED (int): The type of the instance is unspecified. - If set when creating an instance, a ``PRODUCTION`` instance will - be created. If set when updating an instance, the type will be - left unchanged. - PRODUCTION (int): An instance meant for production use. - ``serve_nodes`` must be set on the cluster. - DEVELOPMENT (int): The instance is meant for development and testing - purposes only; it has no performance or uptime guarantees and is not - covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster - must not be set. - """ - - UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED - PRODUCTION = enums.Instance.Type.PRODUCTION - DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - - -class Cluster(object): - class State(object): - """ - Possible states of a cluster. - - Attributes: - NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready - to serve requests. - CREATING (int): The cluster is currently being created, and may be - destroyed if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may - revert to its previous node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) - still exist, but no operations can be performed on the cluster. - """ - - NOT_KNOWN = enums.Cluster.State.STATE_NOT_KNOWN - READY = enums.Cluster.State.READY - CREATING = enums.Cluster.State.CREATING - RESIZING = enums.Cluster.State.RESIZING - DISABLED = enums.Cluster.State.DISABLED - - -class RoutingPolicyType(object): - """ - The type of the routing policy for app_profile. - - Attributes: - ANY (int): Read/write requests may be routed to any cluster in the - instance, and will fail over to another cluster in the event of - transient errors or delays. - Choosing this option sacrifices read-your-writes consistency to - improve availability. - See - https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - - SINGLE (int): Unconditionally routes all read/write requests to a - specific cluster. - This option preserves read-your-writes consistency, but does not improve - availability. - See - https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting - """ - - ANY = 1 - SINGLE = 2 - - -class Table(object): - class View(object): - """ - Defines a view over a table's fields. - - Attributes: - VIEW_UNSPECIFIED (int): Uses the default view for each method - as documented in its request. - NAME_ONLY (int): Only populates ``name``. - SCHEMA_VIEW (int): Only populates ``name`` and fields related - to the table's schema. - REPLICATION_VIEW (int): This is a private alpha release of - Cloud Bigtable replication. This feature is not currently available - to most Cloud Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. - - Only populates ``name`` and fields related to the table's - replication state. - FULL (int): Populates all fields. - """ - - VIEW_UNSPECIFIED = enums.Table.View.VIEW_UNSPECIFIED - NAME_ONLY = enums.Table.View.NAME_ONLY - SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW - REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW - FULL = enums.Table.View.FULL - - class ReplicationState(object): - """ - Table replication states. - - Attributes: - STATE_NOT_KNOWN (int): The replication state of the table is unknown - in this cluster. - INITIALIZING (int): The cluster was recently created, and the table - must finish copying - over pre-existing data from other clusters before it can begin - receiving live replication updates and serving - ``Data API`` requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve - ``Data API`` requests from this - cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve - ``Data API`` requests from this - cluster due to unplanned or emergency maintenance. - READY (int): The table can serve - ``Data API`` requests from this - cluster. Depending on replication delay, reads may not immediately - reflect the state of the table in other clusters. - """ - - STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enums.Table.ClusterState.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE - ) - UNPLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE - ) - READY = enums.Table.ClusterState.ReplicationState.READY diff --git a/bigtable/google/cloud/bigtable/instance.py b/bigtable/google/cloud/bigtable/instance.py deleted file mode 100644 index dbdd20640918..000000000000 --- a/bigtable/google/cloud/bigtable/instance.py +++ /dev/null @@ -1,716 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable Instance.""" - - -import re - -from google.cloud.bigtable.table import Table -from google.cloud.bigtable.cluster import Cluster -from google.cloud.bigtable.app_profile import AppProfile - -from google.protobuf import field_mask_pb2 - -from google.cloud.bigtable_admin_v2.types import instance_pb2, options_pb2 - -from google.api_core.exceptions import NotFound - -from google.cloud.bigtable.policy import Policy - -import warnings - -_INSTANCE_NAME_RE = re.compile( - r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" -) - -_INSTANCE_CREATE_WARNING = """ -Use of `instance.create({0}, {1}, {2})` will be deprecated. -Please replace with -`cluster = instance.cluster({0}, {1}, {2})` -`instance.create(clusters=[cluster])`.""" - - -class Instance(object): - """Representation of a Google Cloud Bigtable Instance. - - We can use an :class:`Instance` to: - - * :meth:`reload` itself - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - .. note:: - - For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. - - :type instance_id: str - :param instance_id: The ID of the instance. - - :type client: :class:`Client ` - :param client: The client that owns the instance. Provides - authorization and a project ID. - - :type display_name: str - :param display_name: (Optional) The display name for the instance in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the instance ID. - - :type instance_type: int - :param instance_type: (Optional) The type of the instance. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.Instance.Type.PRODUCTION`. - :data:`google.cloud.bigtable.enums.Instance.Type.DEVELOPMENT`, - Defaults to - :data:`google.cloud.bigtable.enums.Instance.Type.UNSPECIFIED`. - - :type labels: dict - :param labels: (Optional) Labels are a flexible and lightweight - mechanism for organizing cloud resources into groups - that reflect a customer's organizational needs and - deployment strategies. They can be used to filter - resources and aggregate metrics. Label keys must be - between 1 and 63 characters long. Maximum 64 labels can - be associated with a given resource. Label values must - be between 0 and 63 characters long. Keys and values - must both be under 128 bytes. - - :type _state: int - :param _state: (`OutputOnly`) - The current state of the instance. - Possible values are represented by the following constants: - :data:`google.cloud.bigtable.enums.Instance.State.STATE_NOT_KNOWN`. - :data:`google.cloud.bigtable.enums.Instance.State.READY`. - :data:`google.cloud.bigtable.enums.Instance.State.CREATING`. - """ - - def __init__( - self, - instance_id, - client, - display_name=None, - instance_type=None, - labels=None, - _state=None, - ): - self.instance_id = instance_id - self._client = client - self.display_name = display_name or instance_id - self.type_ = instance_type - self.labels = labels - self._state = _state - - def _update_from_pb(self, instance_pb): - """Refresh self from the server-provided protobuf. - Helper for :meth:`from_pb` and :meth:`reload`. - """ - if not instance_pb.display_name: # Simple field (string) - raise ValueError("Instance protobuf does not contain display_name") - self.display_name = instance_pb.display_name - self.type_ = instance_pb.type - self.labels = dict(instance_pb.labels) - self._state = instance_pb.state - - @classmethod - def from_pb(cls, instance_pb, client): - """Creates an instance instance from a protobuf. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_from_pb] - :end-before: [END bigtable_instance_from_pb] - - :type instance_pb: :class:`instance_pb2.Instance` - :param instance_pb: An instance protobuf object. - - :type client: :class:`Client ` - :param client: The client that owns the instance. - - :rtype: :class:`Instance` - :returns: The instance parsed from the protobuf response. - :raises: :class:`ValueError ` if the instance - name does not match - ``projects/{project}/instances/{instance_id}`` - or if the parsed project ID does not match the project ID - on the client. - """ - match = _INSTANCE_NAME_RE.match(instance_pb.name) - if match is None: - raise ValueError( - "Instance protobuf name was not in the " "expected format.", - instance_pb.name, - ) - if match.group("project") != client.project: - raise ValueError( - "Project ID on instance does not match the " "project ID on the client" - ) - instance_id = match.group("instance_id") - - result = cls(instance_id, client) - result._update_from_pb(instance_pb) - return result - - @property - def name(self): - """Instance name used in requests. - - .. note:: - This property will not change if ``instance_id`` does not, - but the return value is not cached. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_name] - :end-before: [END bigtable_instance_name] - - The instance name is of the form - - ``"projects/{project}/instances/{instance_id}"`` - - :rtype: str - :returns: Return a fully-qualified instance string. - """ - return self._client.instance_admin_client.instance_path( - project=self._client.project, instance=self.instance_id - ) - - @property - def state(self): - """google.cloud.bigtable.enums.Instance.State: state of Instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_state] - :end-before: [END bigtable_instance_state] - - """ - return self._state - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - # NOTE: This does not compare the configuration values, such as - # the display_name. Instead, it only compares - # identifying values instance ID and client. This is - # intentional, since the same instance can be in different states - # if not synchronized. Instances with similar instance - # settings but different clients can't be used in the same way. - return other.instance_id == self.instance_id and other._client == self._client - - def __ne__(self, other): - return not self == other - - def create( - self, - location_id=None, - serve_nodes=None, - default_storage_type=None, - clusters=None, - ): - """Create this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_prod_instance] - :end-before: [END bigtable_create_prod_instance] - - .. note:: - - Uses the ``project`` and ``instance_id`` on the current - :class:`Instance` in addition to the ``display_name``. - To change them before creating, reset the values via - - .. code:: python - - instance.display_name = 'New display name' - instance.instance_id = 'i-changed-my-mind' - - before calling :meth:`create`. - - :type location_id: str - :param location_id: (Creation Only) The location where nodes and - storage of the cluster owned by this instance - reside. For best performance, clients should be - located as close as possible to cluster's location. - For list of supported locations refer to - https://cloud.google.com/bigtable/docs/locations - - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the instance's - cluster; used to set up the instance's cluster. - - :type default_storage_type: int - :param default_storage_type: (Optional) The storage media type for - persisting Bigtable data. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, - Defaults to - :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. - - :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` - :param clusters: List of clusters to be created. - - :rtype: :class:`~google.api_core.operation.Operation` - :returns: The long-running operation corresponding to the create - operation. - - :raises: :class:`ValueError ` if both - ``clusters`` and one of ``location_id``, ``serve_nodes`` - and ``default_storage_type`` are set. - """ - - if clusters is None: - warnings.warn( - _INSTANCE_CREATE_WARNING.format( - "location_id", "serve_nodes", "default_storage_type" - ), - DeprecationWarning, - stacklevel=2, - ) - - cluster_id = "{}-cluster".format(self.instance_id) - - clusters = [ - self.cluster( - cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=default_storage_type, - ) - ] - elif ( - location_id is not None - or serve_nodes is not None - or default_storage_type is not None - ): - raise ValueError( - "clusters and one of location_id, serve_nodes, \ - default_storage_type can not be set \ - simultaneously." - ) - - instance_pb = instance_pb2.Instance( - display_name=self.display_name, type=self.type_, labels=self.labels - ) - - parent = self._client.project_path - - return self._client.instance_admin_client.create_instance( - parent=parent, - instance_id=self.instance_id, - instance=instance_pb, - clusters={c.cluster_id: c._to_pb() for c in clusters}, - ) - - def exists(self): - """Check whether the instance already exists. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_check_instance_exists] - :end-before: [END bigtable_check_instance_exists] - - :rtype: bool - :returns: True if the table exists, else False. - """ - try: - self._client.instance_admin_client.get_instance(name=self.name) - return True - # NOTE: There could be other exceptions that are returned to the user. - except NotFound: - return False - - def reload(self): - """Reload the metadata for this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_instance] - :end-before: [END bigtable_reload_instance] - """ - instance_pb = self._client.instance_admin_client.get_instance(self.name) - - # NOTE: _update_from_pb does not check that the project and - # instance ID on the response match the request. - self._update_from_pb(instance_pb) - - def update(self): - """Updates an instance within a project. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_update_instance] - :end-before: [END bigtable_update_instance] - - .. note:: - - Updates any or all of the following values: - ``display_name`` - ``type`` - ``labels`` - To change a value before - updating, assign that values via - - .. code:: python - - instance.display_name = 'New display name' - - before calling :meth:`update`. - - :rtype: :class:`~google.api_core.operation.Operation` - :returns: The long-running operation corresponding to the update - operation. - """ - update_mask_pb = field_mask_pb2.FieldMask() - if self.display_name is not None: - update_mask_pb.paths.append("display_name") - if self.type_ is not None: - update_mask_pb.paths.append("type") - if self.labels is not None: - update_mask_pb.paths.append("labels") - instance_pb = instance_pb2.Instance( - name=self.name, - display_name=self.display_name, - type=self.type_, - labels=self.labels, - ) - - return self._client.instance_admin_client.partial_update_instance( - instance=instance_pb, update_mask=update_mask_pb - ) - - def delete(self): - """Delete this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_instance] - :end-before: [END bigtable_delete_instance] - - Marks an instance and all of its tables for permanent deletion - in 7 days. - - Immediately upon completion of the request: - - * Billing will cease for all of the instance's reserved resources. - * The instance's ``delete_time`` field will be set 7 days in - the future. - - Soon afterward: - - * All tables within the instance will become unavailable. - - At the instance's ``delete_time``: - - * The instance and **all of its tables** will immediately and - irrevocably disappear from the API, and their data will be - permanently deleted. - """ - self._client.instance_admin_client.delete_instance(name=self.name) - - def get_iam_policy(self, requested_policy_version=None): - """Gets the access control policy for an instance resource. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_get_iam_policy] - :end-before: [END bigtable_get_iam_policy] - - :type requested_policy_version: int or ``NoneType`` - :param requested_policy_version: Optional. The version of IAM policies to request. - If a policy with a condition is requested without - setting this, the server will return an error. - This must be set to a value of 3 to retrieve IAM - policies containing conditions. This is to prevent - client code that isn't aware of IAM conditions from - interpreting and modifying policies incorrectly. - The service might return a policy with version lower - than the one that was requested, based on the - feature syntax in the policy fetched. - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this instance - """ - args = {"resource": self.name} - if requested_policy_version is not None: - args["options_"] = options_pb2.GetPolicyOptions( - requested_policy_version=requested_policy_version - ) - - instance_admin_client = self._client.instance_admin_client - - resp = instance_admin_client.get_iam_policy(**args) - return Policy.from_pb(resp) - - def set_iam_policy(self, policy): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - - For more information about policy, please see documentation of - class `google.cloud.bigtable.policy.Policy` - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_set_iam_policy] - :end-before: [END bigtable_set_iam_policy] - - :type policy: :class:`google.cloud.bigtable.policy.Policy` - :param policy: A new IAM policy to replace the current IAM policy - of this instance - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this instance. - """ - instance_admin_client = self._client.instance_admin_client - resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_pb() - ) - return Policy.from_pb(resp) - - def test_iam_permissions(self, permissions): - """Returns permissions that the caller has on the specified instance - resource. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_test_iam_permissions] - :end-before: [END bigtable_test_iam_permissions] - - :type permissions: list - :param permissions: The set of permissions to check for - the ``resource``. Permissions with wildcards (such as '*' - or 'storage.*') are not allowed. For more information see - `IAM Overview - `_. - `Bigtable Permissions - `_. - - :rtype: list - :returns: A List(string) of permissions allowed on the instance - """ - instance_admin_client = self._client.instance_admin_client - resp = instance_admin_client.test_iam_permissions( - resource=self.name, permissions=permissions - ) - return list(resp.permissions) - - def cluster( - self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None - ): - """Factory to create a cluster associated with this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_cluster] - :end-before: [END bigtable_create_cluster] - - :type cluster_id: str - :param cluster_id: The ID of the cluster. - - :type instance: :class:`~google.cloud.bigtable.instance.Instance` - :param instance: The instance where the cluster resides. - - :type location_id: str - :param location_id: (Creation Only) The location where this cluster's - nodes and storage reside. For best performance, - clients should be located as close as possible to - this cluster. - For list of supported locations refer to - https://cloud.google.com/bigtable/docs/locations - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - - :type default_storage_type: int - :param default_storage_type: (Optional) The type of storage - Possible values are represented by the - following constants: - :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, - Defaults to - :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. - - :rtype: :class:`~google.cloud.bigtable.instance.Cluster` - :returns: a cluster owned by this instance. - """ - return Cluster( - cluster_id, - self, - location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=default_storage_type, - ) - - def list_clusters(self): - """List the clusters in this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_list_clusters_on_instance] - :end-before: [END bigtable_list_clusters_on_instance] - - :rtype: tuple - :returns: - (clusters, failed_locations), where 'clusters' is list of - :class:`google.cloud.bigtable.instance.Cluster`, and - 'failed_locations' is a list of locations which could not - be resolved. - """ - resp = self._client.instance_admin_client.list_clusters(self.name) - clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] - return clusters, resp.failed_locations - - def table(self, table_id, mutation_timeout=None, app_profile_id=None): - """Factory to create a table associated with this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_table] - :end-before: [END bigtable_create_table] - - :type table_id: str - :param table_id: The ID of the table. - - :type app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - - :rtype: :class:`Table ` - :returns: The table owned by this instance. - """ - return Table( - table_id, - self, - app_profile_id=app_profile_id, - mutation_timeout=mutation_timeout, - ) - - def list_tables(self): - """List the tables in this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_list_tables] - :end-before: [END bigtable_list_tables] - - :rtype: list of :class:`Table ` - :returns: The list of tables owned by the instance. - :raises: :class:`ValueError ` if one of the - returned tables has a name that is not of the expected format. - """ - table_list_pb = self._client.table_admin_client.list_tables(self.name) - - result = [] - for table_pb in table_list_pb: - table_prefix = self.name + "/tables/" - if not table_pb.name.startswith(table_prefix): - raise ValueError( - "Table name {} not of expected format".format(table_pb.name) - ) - table_id = table_pb.name[len(table_prefix) :] - result.append(self.table(table_id)) - - return result - - def app_profile( - self, - app_profile_id, - routing_policy_type=None, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ): - """Factory to create AppProfile associated with this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_create_app_profile] - :end-before: [END bigtable_create_app_profile] - - :type app_profile_id: str - :param app_profile_id: The ID of the AppProfile. Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type: routing_policy_type: int - :param: routing_policy_type: The type of the routing policy. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` - :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` - - :type: description: str - :param: description: (Optional) Long form description of the use - case for this AppProfile. - - :type: cluster_id: str - :param: cluster_id: (Optional) Unique cluster_id which is only required - when routing_policy_type is - ROUTING_POLICY_TYPE_SINGLE. - - :type: allow_transactional_writes: bool - :param: allow_transactional_writes: (Optional) If true, allow - transactional writes for - ROUTING_POLICY_TYPE_SINGLE. - - :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` - :returns: AppProfile for this instance. - """ - return AppProfile( - app_profile_id, - self, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - - def list_app_profiles(self): - """Lists information about AppProfiles in an instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_list_app_profiles] - :end-before: [END bigtable_list_app_profiles] - - :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] - :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. - By default, this is a list of - :class:`~google.cloud.bigtable.app_profile.AppProfile` - instances. - """ - resp = self._client.instance_admin_client.list_app_profiles(self.name) - return [AppProfile.from_pb(app_profile, self) for app_profile in resp] diff --git a/bigtable/google/cloud/bigtable/policy.py b/bigtable/google/cloud/bigtable/policy.py deleted file mode 100644 index 65be0158a006..000000000000 --- a/bigtable/google/cloud/bigtable/policy.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 - -from google.api_core.iam import Policy as BasePolicy -from google.cloud._helpers import _to_bytes -from google.iam.v1 import policy_pb2 - -"""IAM roles supported by Bigtable Instance resource""" -BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin" -"""Administers all instances within a project, including the data stored -within tables. Can create new instances. Intended for project administrators. -""" -BIGTABLE_USER_ROLE = "roles/bigtable.user" -"""Provides read-write access to the data stored within tables. Intended for -application developers or service accounts. -""" -BIGTABLE_READER_ROLE = "roles/bigtable.reader" -"""Provides read-only access to the data stored within tables. Intended for -data scientists, dashboard generators, and other data-analysis scenarios. -""" -BIGTABLE_VIEWER_ROLE = "roles/bigtable.viewer" -"""Provides no data access. Intended as a minimal set of permissions to access -the GCP Console for Cloud Bigtable. -""" -"""For detailed information -See -https://cloud.google.com/bigtable/docs/access-control#roles -""" - - -class Policy(BasePolicy): - """IAM Policy - - See - https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.iam.v1#policy - - A Policy consists of a list of bindings. A binding binds a list of - members to a role, where the members can be user accounts, Google - groups, Google domains, and service accounts. A role is a named list - of permissions defined by IAM. - For more information about predefined roles currently supoprted - by Bigtable Instance please see - `Predefined roles - `_. - For more information about custom roles please see - `Custom roles - `_. - - :type etag: str - :param etag: etag is used for optimistic concurrency control as a way to - help prevent simultaneous updates of a policy from overwriting - each other. It is strongly suggested that systems make use - of the etag in the read-modify-write cycle to perform policy - updates in order to avoid race conditions: - An etag is returned in the response to getIamPolicy, and - systems are expected to put that etag in the request to - setIamPolicy to ensure that their change will be applied to - the same version of the policy. - - If no etag is provided in the call to setIamPolicy, then the - existing policy is overwritten blindly. - :type version: int - :param version: The syntax schema version of the policy. - - Note: - Using conditions in bindings requires the policy's version to be set - to `3` or greater, depending on the versions that are currently supported. - - Accessing the policy using dict operations will raise InvalidOperationException - when the policy's version is set to 3. - - Use the policy.bindings getter/setter to retrieve and modify the policy's bindings. - - See: - IAM Policy https://cloud.google.com/iam/reference/rest/v1/Policy - Policy versions https://cloud.google.com/iam/docs/policies#versions - Conditions overview https://cloud.google.com/iam/docs/conditions-overview. - """ - - def __init__(self, etag=None, version=None): - BasePolicy.__init__( - self, etag=etag if etag is None else _to_bytes(etag), version=version - ) - - @property - def bigtable_admins(self): - """Access to bigtable.admin role memebers - - Raise InvalidOperationException if version is greater than 1 or policy contains conditions. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_admins_policy] - :end-before: [END bigtable_admins_policy] - """ - result = set() - for member in self.get(BIGTABLE_ADMIN_ROLE, ()): - result.add(member) - return frozenset(result) - - @property - def bigtable_readers(self): - """Access to bigtable.reader role memebers - - Raise InvalidOperationException if version is greater than 1 or policy contains conditions. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_readers_policy] - :end-before: [END bigtable_readers_policy] - """ - result = set() - for member in self.get(BIGTABLE_READER_ROLE, ()): - result.add(member) - return frozenset(result) - - @property - def bigtable_users(self): - """Access to bigtable.user role memebers - - Raise InvalidOperationException if version is greater than 1 or policy contains conditions. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_users_policy] - :end-before: [END bigtable_users_policy] - """ - result = set() - for member in self.get(BIGTABLE_USER_ROLE, ()): - result.add(member) - return frozenset(result) - - @property - def bigtable_viewers(self): - """Access to bigtable.viewer role memebers - - Raise InvalidOperationException if version is greater than 1 or policy contains conditions. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_viewers_policy] - :end-before: [END bigtable_viewers_policy] - """ - result = set() - for member in self.get(BIGTABLE_VIEWER_ROLE, ()): - result.add(member) - return frozenset(result) - - @classmethod - def from_pb(cls, policy_pb): - """Factory: create a policy from a protobuf message. - - Args: - policy_pb (google.iam.policy_pb2.Policy): message returned by - ``get_iam_policy`` gRPC API. - - Returns: - :class:`Policy`: the parsed policy - """ - policy = cls(policy_pb.etag, policy_pb.version) - - policy.bindings = bindings = [] - for binding_pb in policy_pb.bindings: - binding = {"role": binding_pb.role, "members": set(binding_pb.members)} - condition = binding_pb.condition - if condition and condition.expression: - binding["condition"] = { - "title": condition.title, - "description": condition.description, - "expression": condition.expression, - } - bindings.append(binding) - - return policy - - def to_pb(self): - """Render a protobuf message. - - Returns: - google.iam.policy_pb2.Policy: a message to be passed to the - ``set_iam_policy`` gRPC API. - """ - - return policy_pb2.Policy( - etag=self.etag, - version=self.version or 0, - bindings=[ - policy_pb2.Binding( - role=binding["role"], - members=sorted(binding["members"]), - condition=binding.get("condition"), - ) - for binding in self.bindings - if binding["members"] - ], - ) - - @classmethod - def from_api_repr(cls, resource): - """Factory: create a policy from a JSON resource. - - Overrides the base class version to store :attr:`etag` as bytes. - - Args: - resource (dict): JSON policy resource returned by the - ``getIamPolicy`` REST API. - - Returns: - :class:`Policy`: the parsed policy - """ - etag = resource.get("etag") - - if etag is not None: - resource = resource.copy() - resource["etag"] = base64.b64decode(etag.encode("ascii")) - - return super(Policy, cls).from_api_repr(resource) - - def to_api_repr(self): - """Render a JSON policy resource. - - Overrides the base class version to convert :attr:`etag` from bytes - to JSON-compatible base64-encoded text. - - Returns: - dict: a JSON resource to be passed to the - ``setIamPolicy`` REST API. - """ - resource = super(Policy, self).to_api_repr() - - if self.etag is not None: - resource["etag"] = base64.b64encode(self.etag).decode("ascii") - - return resource diff --git a/bigtable/google/cloud/bigtable/row.py b/bigtable/google/cloud/bigtable/row.py deleted file mode 100644 index 079ba6c8f497..000000000000 --- a/bigtable/google/cloud/bigtable/row.py +++ /dev/null @@ -1,997 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable Row.""" - - -import struct - -import six - -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - -_PACK_I64 = struct.Struct(">q").pack - -MAX_MUTATIONS = 100000 -"""The maximum number of mutations that a row can accumulate.""" - - -class Row(object): - """Base representation of a Google Cloud Bigtable Row. - - This class has three subclasses corresponding to the three - RPC methods for sending row mutations: - - * :class:`DirectRow` for ``MutateRow`` - * :class:`ConditionalRow` for ``CheckAndMutateRow`` - * :class:`AppendRow` for ``ReadModifyWriteRow`` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: (Optional) The table that owns the row. - """ - - def __init__(self, row_key, table=None): - self._row_key = _to_bytes(row_key) - self._table = table - - @property - def row_key(self): - """Row key. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_row_key] - :end-before: [END bigtable_row_row_key] - - :rtype: bytes - :returns: The key for the current row. - """ - return self._row_key - - @property - def table(self): - """Row table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_table] - :end-before: [END bigtable_row_table] - - :rtype: table: :class:`Table ` - :returns: table: The table that owns the row. - """ - return self._table - - -class _SetDeleteRow(Row): - """Row helper for setting or deleting cell values. - - Implements helper methods to add mutations to set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - ALL_COLUMNS = object() - """Sentinel value used to indicate all columns in a column family.""" - - def _get_mutations(self, state=None): - """Gets the list of mutations for a given state. - - This method intended to be implemented by subclasses. - - ``state`` may not need to be used by all subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :raises: :class:`NotImplementedError ` - always. - """ - raise NotImplementedError - - def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): - """Helper for :meth:`set_cell` - - Adds a mutation to set the value in a specific cell. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - column = _to_bytes(column) - if isinstance(value, six.integer_types): - value = _PACK_I64(value) - value = _to_bytes(value) - if timestamp is None: - # Use -1 for current Bigtable server time. - timestamp_micros = -1 - else: - timestamp_micros = _microseconds_from_datetime(timestamp) - # Truncate to millisecond granularity. - timestamp_micros -= timestamp_micros % 1000 - - mutation_val = data_v2_pb2.Mutation.SetCell( - family_name=column_family_id, - column_qualifier=column, - timestamp_micros=timestamp_micros, - value=value, - ) - mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete(self, state=None): - """Helper for :meth:`delete` - - Adds a delete mutation (for the entire row) to the accumulated - mutations. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutation_val = data_v2_pb2.Mutation.DeleteFromRow() - mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete_cells(self, column_family_id, columns, time_range=None, state=None): - """Helper for :meth:`delete_cell` and :meth:`delete_cells`. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutations_list = self._get_mutations(state) - if columns is self.ALL_COLUMNS: - mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id - ) - mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) - mutations_list.append(mutation_pb) - else: - delete_kwargs = {} - if time_range is not None: - delete_kwargs["time_range"] = time_range.to_pb() - - to_append = [] - for column in columns: - column = _to_bytes(column) - # time_range will never change if present, but the rest of - # delete_kwargs will - delete_kwargs.update( - family_name=column_family_id, column_qualifier=column - ) - mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) - mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) - to_append.append(mutation_pb) - - # We don't add the mutations until all columns have been - # processed without error. - mutations_list.extend(to_append) - - -class DirectRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending "direct" mutations. - - These mutations directly set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - These methods can be used directly:: - - >>> row = table.row(b'row-key1') - >>> row.set_cell(u'fam', b'col1', b'cell-val') - >>> row.delete_cell(u'fam', b'col2') - - .. note:: - - A :class:`DirectRow` accumulates mutations locally via the - :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and - :meth:`delete_cells` methods. To actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: (Optional) The table that owns the row. This is - used for the :meth: `commit` only. Alternatively, - DirectRows can be persisted via - :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. - """ - - def __init__(self, row_key, table=None): - super(DirectRow, self).__init__(row_key, table) - self._pb_mutations = [] - - def _get_mutations(self, state=None): # pylint: disable=unused-argument - """Gets the list of mutations for a given state. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - return self._pb_mutations - - def get_mutations_size(self): - """ Gets the total mutations size for current row - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_get_mutations_size] - :end-before: [END bigtable_row_get_mutations_size] - - """ - - mutation_size = 0 - for mutation in self._get_mutations(): - mutation_size += mutation.ByteSize() - - return mutation_size - - def set_cell(self, column_family_id, column, value, timestamp=None): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this :class:`DirectRow` - and the ``column``. The ``column`` must be in an existing - :class:`.ColumnFamily` (as determined by ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_set_cell] - :end-before: [END bigtable_row_set_cell] - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None) - - def delete(self): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete] - :end-before: [END bigtable_row_delete] - - """ - self._delete(state=None) - - def delete_cell(self, column_family_id, column, time_range=None): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cell] - :end-before: [END bigtable_row_delete_cell] - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells( - column_family_id, [column], time_range=time_range, state=None - ) - - def delete_cells(self, column_family_id, columns, time_range=None): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cells] - :end-before: [END bigtable_row_delete_cells] - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells(column_family_id, columns, time_range=time_range, state=None) - - def commit(self): - """Makes a ``MutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations to an empty list. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] - - :raises: :exc:`~.table.TooManyMutationsError` if the number of - mutations is greater than 100,000. - """ - self._table.mutate_rows([self]) - self.clear() - - def clear(self): - """Removes all currently accumulated mutations on the current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] - - """ - del self._pb_mutations[:] - - -class ConditionalRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending mutations conditionally. - - Each mutation has an associated state: :data:`True` or :data:`False`. - When :meth:`commit`-ed, the mutations for the :data:`True` - state will be applied if the filter matches any cells in - the row, otherwise the :data:`False` state will be applied. - - A :class:`ConditionalRow` accumulates mutations in the same way a - :class:`DirectRow` does: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - with the only change the extra ``state`` parameter:: - - >>> row_cond = table.row(b'row-key2', filter_=row_filter) - >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) - >>> row_cond.delete_cell(u'fam', b'col', state=False) - - .. note:: - - As with :class:`DirectRow`, to actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - - :type filter_: :class:`.RowFilter` - :param filter_: Filter to be used for conditional mutations. - """ - - def __init__(self, row_key, table, filter_): - super(ConditionalRow, self).__init__(row_key, table) - self._filter = filter_ - self._true_pb_mutations = [] - self._false_pb_mutations = [] - - def _get_mutations(self, state=None): - """Gets the list of mutations for a given state. - - Over-ridden so that the state can be used in: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - if state: - return self._true_pb_mutations - else: - return self._false_pb_mutations - - def commit(self): - """Makes a ``CheckAndMutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - The mutations will be applied conditionally, based on whether the - filter matches any cells in the :class:`ConditionalRow` or not. (Each - method which adds a mutation has a ``state`` parameter for this - purpose.) - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] - - :rtype: bool - :returns: Flag indicating if the filter was matched (which also - indicates which set of mutations were applied by the server). - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - true_mutations = self._get_mutations(state=True) - false_mutations = self._get_mutations(state=False) - num_true_mutations = len(true_mutations) - num_false_mutations = len(false_mutations) - if num_true_mutations == 0 and num_false_mutations == 0: - return - if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS: - raise ValueError( - "Exceed the maximum allowable mutations (%d). Had %s true " - "mutations and %d false mutations." - % (MAX_MUTATIONS, num_true_mutations, num_false_mutations) - ) - - data_client = self._table._instance._client.table_data_client - resp = data_client.check_and_mutate_row( - table_name=self._table.name, - row_key=self._row_key, - predicate_filter=self._filter.to_pb(), - app_profile_id=self._table._app_profile_id, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - self.clear() - return resp.predicate_matched - - # pylint: disable=arguments-differ - def set_cell(self, column_family_id, column, value, timestamp=None, state=True): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this - :class:`ConditionalRow` and the ``column``. The ``column`` must be in - an existing :class:`.ColumnFamily` (as determined by - ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_set_cell] - :end-before: [END bigtable_row_set_cell] - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._set_cell( - column_family_id, column, value, timestamp=timestamp, state=state - ) - - def delete(self, state=True): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete] - :end-before: [END bigtable_row_delete] - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete(state=state) - - def delete_cell(self, column_family_id, column, time_range=None, state=True): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cell] - :end-before: [END bigtable_row_delete_cell] - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells( - column_family_id, [column], time_range=time_range, state=state - ) - - def delete_cells(self, column_family_id, columns, time_range=None, state=True): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cells] - :end-before: [END bigtable_row_delete_cells] - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then the - entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells( - column_family_id, columns, time_range=time_range, state=state - ) - - # pylint: enable=arguments-differ - - def clear(self): - """Removes all currently accumulated mutations on the current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] - - """ - del self._true_pb_mutations[:] - del self._false_pb_mutations[:] - - -class AppendRow(Row): - """Google Cloud Bigtable Row for sending append mutations. - - These mutations are intended to augment the value of an existing cell - and uses the methods: - - * :meth:`append_cell_value` - * :meth:`increment_cell_value` - - The first works by appending bytes and the second by incrementing an - integer (stored in the cell as 8 bytes). In either case, if the - cell is empty, assumes the default empty value (empty string for - bytes or 0 for integer). - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - def __init__(self, row_key, table): - super(AppendRow, self).__init__(row_key, table) - self._rule_pb_list = [] - - def clear(self): - """Removes all currently accumulated modifications on current row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] - - """ - del self._rule_pb_list[:] - - def append_cell_value(self, column_family_id, column, value): - """Appends a value to an existing cell. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_append_cell_value] - :end-before: [END bigtable_row_append_cell_value] - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes - :param value: The value to append to the existing value in the cell. If - the targeted cell is unset, it will be treated as - containing the empty string. - """ - column = _to_bytes(column) - value = _to_bytes(value) - rule_pb = data_v2_pb2.ReadModifyWriteRule( - family_name=column_family_id, column_qualifier=column, append_value=value - ) - self._rule_pb_list.append(rule_pb) - - def increment_cell_value(self, column_family_id, column, int_value): - """Increments a value in an existing cell. - - Assumes the value in the cell is stored as a 64 bit integer - serialized to bytes. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_increment_cell_value] - :end-before: [END bigtable_row_increment_cell_value] - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type int_value: int - :param int_value: The value to increment the existing value in the cell - by. If the targeted cell is unset, it will be treated - as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit - big-endian signed integer), or the entire request - will fail. - """ - column = _to_bytes(column) - rule_pb = data_v2_pb2.ReadModifyWriteRule( - family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value, - ) - self._rule_pb_list.append(rule_pb) - - def commit(self): - """Makes a ``ReadModifyWriteRow`` API request. - - This commits modifications made by :meth:`append_cell_value` and - :meth:`increment_cell_value`. If no modifications were made, makes - no API request and just returns ``{}``. - - Modifies a row atomically, reading the latest existing - timestamp / value from the specified columns and writing a new value by - appending / incrementing. The new cell created uses either the current - server time or the highest timestamp of a cell in that column (if it - exceeds the server time). - - After committing the accumulated mutations, resets the local mutations. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - num_mutations = len(self._rule_pb_list) - if num_mutations == 0: - return {} - if num_mutations > MAX_MUTATIONS: - raise ValueError( - "%d total append mutations exceed the maximum " - "allowable %d." % (num_mutations, MAX_MUTATIONS) - ) - - data_client = self._table._instance._client.table_data_client - row_response = data_client.read_modify_write_row( - table_name=self._table.name, - row_key=self._row_key, - rules=self._rule_pb_list, - app_profile_id=self._table._app_profile_id, - ) - - # Reset modifications after commit-ing request. - self.clear() - - # NOTE: We expect row_response.key == self._row_key but don't check. - return _parse_rmw_row_response(row_response) - - -def _parse_rmw_row_response(row_response): - """Parses the response to a ``ReadModifyWriteRow`` request. - - :type row_response: :class:`.data_v2_pb2.Row` - :param row_response: The response row (with only modified cells) from a - ``ReadModifyWriteRow`` request. - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. For example: - - .. code:: python - - { - u'col-fam-id': { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - }, - u'col-fam-id2': { - b'col-name3-but-other-fam': [ - (b'foo', datetime.datetime(...)), - ], - }, - } - """ - result = {} - for column_family in row_response.row.families: - column_family_id, curr_family = _parse_family_pb(column_family) - result[column_family_id] = curr_family - return result - - -def _parse_family_pb(family_pb): - """Parses a Family protobuf into a dictionary. - - :type family_pb: :class:`._generated.data_pb2.Family` - :param family_pb: A protobuf - - :rtype: tuple - :returns: A string and dictionary. The string is the name of the - column family and the dictionary has column names (within the - family) as keys and cell lists as values. Each cell is - represented with a two-tuple with the value (in bytes) and the - timestamp for the cell. For example: - - .. code:: python - - { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - } - """ - result = {} - for column in family_pb.columns: - result[column.qualifier] = cells = [] - for cell in column.cells: - val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros)) - cells.append(val_pair) - - return family_pb.name, result diff --git a/bigtable/google/cloud/bigtable/row_data.py b/bigtable/google/cloud/bigtable/row_data.py deleted file mode 100644 index 24078b8496d8..000000000000 --- a/bigtable/google/cloud/bigtable/row_data.py +++ /dev/null @@ -1,699 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Container for Google Cloud Bigtable Cells and Streaming Row Contents.""" - - -import copy -import six - -import grpc - -from google.api_core import exceptions -from google.api_core import retry -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - -_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." -_MISSING_COLUMN = ( - "Column {} is not among the cells stored in this row in the " "column family {}." -) -_MISSING_INDEX = ( - "Index {!r} is not valid for the cells stored in this row for column {} " - "in the column family {}. There are {} such cells." -) - - -class Cell(object): - """Representation of a Google Cloud Bigtable Cell. - - :type value: bytes - :param value: The value stored in the cell. - - :type timestamp_micros: int - :param timestamp_micros: The timestamp_micros when the cell was stored. - - :type labels: list - :param labels: (Optional) List of strings. Labels applied to the cell. - """ - - def __init__(self, value, timestamp_micros, labels=None): - self.value = value - self.timestamp_micros = timestamp_micros - self.labels = list(labels) if labels is not None else [] - - @classmethod - def from_pb(cls, cell_pb): - """Create a new cell from a Cell protobuf. - - :type cell_pb: :class:`._generated.data_pb2.Cell` - :param cell_pb: The protobuf to convert. - - :rtype: :class:`Cell` - :returns: The cell corresponding to the protobuf. - """ - if cell_pb.labels: - return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) - else: - return cls(cell_pb.value, cell_pb.timestamp_micros) - - @property - def timestamp(self): - return _datetime_from_microseconds(self.timestamp_micros) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.value == self.value - and other.timestamp_micros == self.timestamp_micros - and other.labels == self.labels - ) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "<{name} value={value!r} timestamp={timestamp}>".format( - name=self.__class__.__name__, value=self.value, timestamp=self.timestamp - ) - - -class PartialCellData(object): - """Representation of partial cell in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) cell. - - :type family_name: str - :param family_name: The family name of the (partial) cell. - - :type qualifier: bytes - :param qualifier: The column qualifier of the (partial) cell. - - :type timestamp_micros: int - :param timestamp_micros: The timestamp (in microsecods) of the - (partial) cell. - - :type labels: list of str - :param labels: labels assigned to the (partial) cell - - :type value: bytes - :param value: The (accumulated) value of the (partial) cell. - """ - - def __init__( - self, row_key, family_name, qualifier, timestamp_micros, labels=(), value=b"" - ): - self.row_key = row_key - self.family_name = family_name - self.qualifier = qualifier - self.timestamp_micros = timestamp_micros - self.labels = labels - self.value = value - - def append_value(self, value): - """Append bytes from a new chunk to value. - - :type value: bytes - :param value: bytes to append - """ - self.value += value - - -class PartialRowData(object): - """Representation of partial row in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) data. - """ - - def __init__(self, row_key): - self._row_key = row_key - self._cells = {} - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other._row_key == self._row_key and other._cells == self._cells - - def __ne__(self, other): - return not self == other - - def to_dict(self): - """Convert the cells to a dictionary. - - This is intended to be used with HappyBase, so the column family and - column qualiers are combined (with ``:``). - - :rtype: dict - :returns: Dictionary containing all the data in the cells of this row. - """ - result = {} - for column_family_id, columns in six.iteritems(self._cells): - for column_qual, cells in six.iteritems(columns): - key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) - result[key] = cells - return result - - @property - def cells(self): - """Property returning all the cells accumulated on this partial row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_data_cells] - :end-before: [END bigtable_row_data_cells] - - :rtype: dict - :returns: Dictionary of the :class:`Cell` objects accumulated. This - dictionary has two-levels of keys (first for column families - and second for column names/qualifiers within a family). For - a given column, a list of :class:`Cell` objects is stored. - """ - return self._cells - - @property - def row_key(self): - """Getter for the current (partial) row's key. - - :rtype: bytes - :returns: The current (partial) row's key. - """ - return self._row_key - - def find_cells(self, column_family_id, column): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_find_cells] - :end-before: [END bigtable_row_find_cells] - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - - Returns: - List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the - specified column. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - try: - column_family = self._cells[column_family_id] - except KeyError: - raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) - - try: - cells = column_family[column] - except KeyError: - raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) - - return cells - - def cell_value(self, column_family_id, column, index=0): - """Get a single cell value stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_cell_value] - :end-before: [END bigtable_row_cell_value] - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cell - is located. - index (Optional[int]): The offset within the series of values. If - not specified, will return the first cell. - - Returns: - ~google.cloud.bigtable.row_data.Cell value: The cell value stored - in the specified column and specified index. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - IndexError: If ``index`` cannot be found within the cells stored - in this row for the given ``column_family_id``, ``column`` - pair. - """ - cells = self.find_cells(column_family_id, column) - - try: - cell = cells[index] - except (TypeError, IndexError): - num_cells = len(cells) - msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) - raise IndexError(msg) - - return cell.value - - def cell_values(self, column_family_id, column, max_count=None): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_cell_values] - :end-before: [END bigtable_row_cell_values] - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - max_count (int): The maximum number of cells to use. - - Returns: - A generator which provides: cell.value, cell.timestamp_micros - for each cell in the list of cells - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - cells = self.find_cells(column_family_id, column) - if max_count is None: - max_count = len(cells) - - for index, cell in enumerate(cells): - if index == max_count: - break - - yield cell.value, cell.timestamp_micros - - -class InvalidReadRowsResponse(RuntimeError): - """Exception raised to to invalid response data from back-end.""" - - -class InvalidChunk(RuntimeError): - """Exception raised to to invalid chunk data from back-end.""" - - -def _retry_read_rows_exception(exc): - if isinstance(exc, grpc.RpcError): - exc = exceptions.from_grpc_error(exc) - return isinstance(exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded)) - - -DEFAULT_RETRY_READ_ROWS = retry.Retry( - predicate=_retry_read_rows_exception, - initial=1.0, - maximum=15.0, - multiplier=2.0, - deadline=60.0, # 60 seconds -) -"""The default retry strategy to be used on retry-able errors. - -Used by -:meth:`~google.cloud.bigtable.row_data.PartialRowsData._read_next_response`. -""" - - -class PartialRowsData(object): - """Convenience wrapper for consuming a ``ReadRows`` streaming response. - - :type read_method: :class:`client._table_data_client.read_rows` - :param read_method: ``ReadRows`` method. - - :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` - :param request: The ``ReadRowsRequest`` message used to create a - ReadRowsResponse iterator. If the iterator fails, a new - iterator is created, allowing the scan to continue from - the point just beyond the last successfully read row, - identified by self.last_scanned_row_key. The retry happens - inside of the Retry class, using a predicate for the - expected exceptions during iteration. - - :type retry: :class:`~google.api_core.retry.Retry` - :param retry: (Optional) Retry delay and deadline arguments. To override, - the default value :attr:`DEFAULT_RETRY_READ_ROWS` can be - used and modified with the - :meth:`~google.api_core.retry.Retry.with_delay` method - or the - :meth:`~google.api_core.retry.Retry.with_deadline` method. - """ - - NEW_ROW = "New row" # No cells yet complete for row - ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row - CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row - - STATE_NEW_ROW = 1 - STATE_ROW_IN_PROGRESS = 2 - STATE_CELL_IN_PROGRESS = 3 - - read_states = { - STATE_NEW_ROW: NEW_ROW, - STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, - STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS, - } - - def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): - # Counter for rows returned to the user - self._counter = 0 - # In-progress row, unset until first response, after commit/reset - self._row = None - # Last complete row, unset until first commit - self._previous_row = None - # In-progress cell, unset until first response, after completion - self._cell = None - # Last complete cell, unset until first completion, after new row - self._previous_cell = None - - # May be cached from previous response - self.last_scanned_row_key = None - self.read_method = read_method - self.request = request - self.retry = retry - self.response_iterator = read_method(request) - - self.rows = {} - self._state = self.STATE_NEW_ROW - - # Flag to stop iteration, for any reason not related to self.retry() - self._cancelled = False - - @property - def state(self): - """State machine state. - - :rtype: str - :returns: name of state corresponding to current row / chunk - processing. - """ - return self.read_states[self._state] - - def cancel(self): - """Cancels the iterator, closing the stream.""" - self._cancelled = True - self.response_iterator.cancel() - - def consume_all(self, max_loops=None): - """Consume the streamed responses until there are no more. - - .. warning:: - This method will be removed in future releases. Please use this - class as a generator instead. - - :type max_loops: int - :param max_loops: (Optional) Maximum number of times to try to consume - an additional ``ReadRowsResponse``. You can use this - to avoid long wait times. - """ - for row in self: - self.rows[row.row_key] = row - - def _create_retry_request(self): - """Helper for :meth:`__iter__`.""" - req_manager = _ReadRowsRequestManager( - self.request, self.last_scanned_row_key, self._counter - ) - return req_manager.build_updated_request() - - def _on_error(self, exc): - """Helper for :meth:`__iter__`.""" - # restart the read scan from AFTER the last successfully read row - retry_request = self.request - if self.last_scanned_row_key: - retry_request = self._create_retry_request() - - self.response_iterator = self.read_method(retry_request) - - def _read_next(self): - """Helper for :meth:`__iter__`.""" - return six.next(self.response_iterator) - - def _read_next_response(self): - """Helper for :meth:`__iter__`.""" - return self.retry(self._read_next, on_error=self._on_error)() - - def __iter__(self): - """Consume the ``ReadRowsResponse`` s from the stream. - Read the rows and yield each to the reader - - Parse the response and its chunks into a new/existing row in - :attr:`_rows`. Rows are returned in order by row key. - """ - while not self._cancelled: - try: - response = self._read_next_response() - except StopIteration: - if self.state != self.NEW_ROW: - raise ValueError("The row remains partial / is not committed.") - break - - for chunk in response.chunks: - if self._cancelled: - break - self._process_chunk(chunk) - if chunk.commit_row: - self.last_scanned_row_key = self._previous_row.row_key - self._counter += 1 - yield self._previous_row - - resp_last_key = response.last_scanned_row_key - if resp_last_key and resp_last_key > self.last_scanned_row_key: - self.last_scanned_row_key = resp_last_key - - def _process_chunk(self, chunk): - if chunk.reset_row: - self._validate_chunk_reset_row(chunk) - self._row = None - self._cell = self._previous_cell = None - self._state = self.STATE_NEW_ROW - return - - self._update_cell(chunk) - - if self._row is None: - if ( - self._previous_row is not None - and self._cell.row_key <= self._previous_row.row_key - ): - raise InvalidChunk() - self._row = PartialRowData(self._cell.row_key) - - if chunk.value_size == 0: - self._state = self.STATE_ROW_IN_PROGRESS - self._save_current_cell() - else: - self._state = self.STATE_CELL_IN_PROGRESS - - if chunk.commit_row: - if chunk.value_size > 0: - raise InvalidChunk() - - self._previous_row = self._row - self._row = None - self._previous_cell = None - self._state = self.STATE_NEW_ROW - - def _update_cell(self, chunk): - if self._cell is None: - qualifier = None - if chunk.HasField("qualifier"): - qualifier = chunk.qualifier.value - family = None - if chunk.HasField("family_name"): - family = chunk.family_name.value - - self._cell = PartialCellData( - chunk.row_key, - family, - qualifier, - chunk.timestamp_micros, - chunk.labels, - chunk.value, - ) - self._copy_from_previous(self._cell) - self._validate_cell_data_new_cell() - else: - self._cell.append_value(chunk.value) - - def _validate_cell_data_new_cell(self): - cell = self._cell - if not cell.row_key or not cell.family_name or cell.qualifier is None: - raise InvalidChunk() - - prev = self._previous_cell - if prev and prev.row_key != cell.row_key: - raise InvalidChunk() - - def _validate_chunk_reset_row(self, chunk): - # No reset for new row - _raise_if(self._state == self.STATE_NEW_ROW) - - # No reset with other keys - _raise_if(chunk.row_key) - _raise_if(chunk.HasField("family_name")) - _raise_if(chunk.HasField("qualifier")) - _raise_if(chunk.timestamp_micros) - _raise_if(chunk.labels) - _raise_if(chunk.value_size) - _raise_if(chunk.value) - _raise_if(chunk.commit_row) - - def _save_current_cell(self): - """Helper for :meth:`consume_next`.""" - row, cell = self._row, self._cell - family = row._cells.setdefault(cell.family_name, {}) - qualified = family.setdefault(cell.qualifier, []) - complete = Cell.from_pb(cell) - qualified.append(complete) - self._cell, self._previous_cell = None, cell - - def _copy_from_previous(self, cell): - """Helper for :meth:`consume_next`.""" - previous = self._previous_cell - if previous is not None: - if not cell.row_key: - cell.row_key = previous.row_key - if not cell.family_name: - cell.family_name = previous.family_name - # NOTE: ``cell.qualifier`` **can** be empty string. - if cell.qualifier is None: - cell.qualifier = previous.qualifier - - -class _ReadRowsRequestManager(object): - """ Update the ReadRowsRequest message in case of failures by - filtering the already read keys. - - :type message: class:`data_messages_v2_pb2.ReadRowsRequest` - :param message: Original ReadRowsRequest containing all of the parameters - of API call - - :type last_scanned_key: bytes - :param last_scanned_key: last successfully scanned key - - :type rows_read_so_far: int - :param rows_read_so_far: total no of rows successfully read so far. - this will be used for updating rows_limit - - """ - - def __init__(self, message, last_scanned_key, rows_read_so_far): - self.message = message - self.last_scanned_key = last_scanned_key - self.rows_read_so_far = rows_read_so_far - - def build_updated_request(self): - """ Updates the given message request as per last scanned key - """ - r_kwargs = { - "table_name": self.message.table_name, - "filter": self.message.filter, - } - - if self.message.rows_limit != 0: - r_kwargs["rows_limit"] = max( - 1, self.message.rows_limit - self.rows_read_so_far - ) - - # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, - # add row_range that starts with last_scanned_key as start_key_open - # to request only rows that have not been returned yet - if not self.message.HasField("rows"): - row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key) - r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range]) - else: - row_keys = self._filter_rows_keys() - row_ranges = self._filter_row_ranges() - r_kwargs["rows"] = data_v2_pb2.RowSet( - row_keys=row_keys, row_ranges=row_ranges - ) - return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs) - - def _filter_rows_keys(self): - """ Helper for :meth:`build_updated_request`""" - return [ - row_key - for row_key in self.message.rows.row_keys - if row_key > self.last_scanned_key - ] - - def _filter_row_ranges(self): - """ Helper for :meth:`build_updated_request`""" - new_row_ranges = [] - - for row_range in self.message.rows.row_ranges: - # if current end_key (open or closed) is set, return its value, - # if not, set to empty string (''). - # NOTE: Empty string in end_key means "end of table" - end_key = self._end_key_set(row_range) - # if end_key is already read, skip to the next row_range - if end_key and self._key_already_read(end_key): - continue - - # if current start_key (open or closed) is set, return its value, - # if not, then set to empty string ('') - # NOTE: Empty string in start_key means "beginning of table" - start_key = self._start_key_set(row_range) - - # if start_key was already read or doesn't exist, - # create a row_range with last_scanned_key as start_key_open - # to be passed to retry request - retry_row_range = row_range - if self._key_already_read(start_key): - retry_row_range = copy.deepcopy(row_range) - retry_row_range.start_key_closed = _to_bytes("") - retry_row_range.start_key_open = self.last_scanned_key - - new_row_ranges.append(retry_row_range) - - return new_row_ranges - - def _key_already_read(self, key): - """ Helper for :meth:`_filter_row_ranges`""" - return key <= self.last_scanned_key - - @staticmethod - def _start_key_set(row_range): - """ Helper for :meth:`_filter_row_ranges`""" - return row_range.start_key_open or row_range.start_key_closed - - @staticmethod - def _end_key_set(row_range): - """ Helper for :meth:`_filter_row_ranges`""" - return row_range.end_key_open or row_range.end_key_closed - - -def _raise_if(predicate, *args): - """Helper for validation methods.""" - if predicate: - raise InvalidChunk(*args) diff --git a/bigtable/google/cloud/bigtable/row_filters.py b/bigtable/google/cloud/bigtable/row_filters.py deleted file mode 100644 index e8a70a9f4add..000000000000 --- a/bigtable/google/cloud/bigtable/row_filters.py +++ /dev/null @@ -1,812 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Filters for Google Cloud Bigtable Row classes.""" - - -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - -class RowFilter(object): - """Basic filter to apply to cells in a row. - - These values can be combined via :class:`RowFilterChain`, - :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. - - .. note:: - - This class is a do-nothing base class for all row filters. - """ - - -class _BoolFilter(RowFilter): - """Row filter that uses a boolean flag. - - :type flag: bool - :param flag: An indicator if a setting is turned on or off. - """ - - def __init__(self, flag): - self.flag = flag - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.flag == self.flag - - def __ne__(self, other): - return not self == other - - -class SinkFilter(_BoolFilter): - """Advanced row filter to skip parent filters. - - :type flag: bool - :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Cannot be used within the - ``predicate_filter``, ``true_filter``, or ``false_filter`` - of a :class:`ConditionalRowFilter`. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(sink=self.flag) - - -class PassAllFilter(_BoolFilter): - """Row filter equivalent to not filtering at all. - - :type flag: bool - :param flag: Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(pass_all_filter=self.flag) - - -class BlockAllFilter(_BoolFilter): - """Row filter that doesn't match any cells. - - :type flag: bool - :param flag: Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(block_all_filter=self.flag) - - -class _RegexFilter(RowFilter): - """Row filter that uses a regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - :type regex: bytes or str - :param regex: A regular expression (RE2) for some row filter. - """ - - def __init__(self, regex): - self.regex = _to_bytes(regex) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.regex == self.regex - - def __ne__(self, other): - return not self == other - - -class RowKeyRegexFilter(_RegexFilter): - """Row filter for a row key regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells from rows with row - keys that satisfy this regex. For a - ``CheckAndMutateRowRequest``, this filter is unnecessary - since the row key is already specified. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) - - -class RowSampleFilter(RowFilter): - """Matches all cells from a row with probability p. - - :type sample: float - :param sample: The probability of matching a cell (must be in the - interval ``(0, 1)`` The end points are excluded). - """ - - def __init__(self, sample): - self.sample = sample - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.sample == self.sample - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(row_sample_filter=self.sample) - - -class FamilyNameRegexFilter(_RegexFilter): - """Row filter for a family name regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - :type regex: str - :param regex: A regular expression (RE2) to match cells from columns in a - given column family. For technical reasons, the regex must - not contain the ``':'`` character, even if it is not being - used as a literal. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) - - -class ColumnQualifierRegexFilter(_RegexFilter): - """Row filter for a column qualifier regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells from column that - match this regex (irrespective of column family). - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) - - -class TimestampRange(object): - """Range of time with inclusive lower and exclusive upper bounds. - - :type start: :class:`datetime.datetime` - :param start: (Optional) The (inclusive) lower bound of the timestamp - range. If omitted, defaults to Unix epoch. - - :type end: :class:`datetime.datetime` - :param end: (Optional) The (exclusive) upper bound of the timestamp - range. If omitted, no upper bound is used. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.start == self.start and other.end == self.end - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the :class:`TimestampRange` to a protobuf. - - :rtype: :class:`.data_v2_pb2.TimestampRange` - :returns: The converted current object. - """ - timestamp_range_kwargs = {} - if self.start is not None: - timestamp_range_kwargs["start_timestamp_micros"] = ( - _microseconds_from_datetime(self.start) // 1000 * 1000 - ) - if self.end is not None: - end_time = _microseconds_from_datetime(self.end) - if end_time % 1000 != 0: - end_time = end_time // 1000 * 1000 + 1000 - timestamp_range_kwargs["end_timestamp_micros"] = end_time - return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) - - -class TimestampRangeFilter(RowFilter): - """Row filter that limits cells to a range of time. - - :type range_: :class:`TimestampRange` - :param range_: Range of time that cells should match against. - """ - - def __init__(self, range_): - self.range_ = range_ - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.range_ == self.range_ - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts the ``range_`` on the current object to a protobuf and - then uses it in the ``timestamp_range_filter`` field. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) - - -class ColumnRangeFilter(RowFilter): - """A row filter to restrict to a range of columns. - - Both the start and end column can be included or excluded in the range. - By default, we include them both, but this can be changed with optional - flags. - - :type column_family_id: str - :param column_family_id: The column family that contains the columns. Must - be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type start_column: bytes - :param start_column: The start of the range of columns. If no value is - used, the backend applies no upper bound to the - values. - - :type end_column: bytes - :param end_column: The end of the range of columns. If no value is used, - the backend applies no upper bound to the values. - - :type inclusive_start: bool - :param inclusive_start: Boolean indicating if the start column should be - included in the range (or excluded). Defaults - to :data:`True` if ``start_column`` is passed and - no ``inclusive_start`` was given. - - :type inclusive_end: bool - :param inclusive_end: Boolean indicating if the end column should be - included in the range (or excluded). Defaults - to :data:`True` if ``end_column`` is passed and - no ``inclusive_end`` was given. - - :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_column`` is given or if ``inclusive_end`` - is set but no ``end_column`` is given - """ - - def __init__( - self, - column_family_id, - start_column=None, - end_column=None, - inclusive_start=None, - inclusive_end=None, - ): - self.column_family_id = column_family_id - - if inclusive_start is None: - inclusive_start = True - elif start_column is None: - raise ValueError( - "Inclusive start was specified but no " "start column was given." - ) - self.start_column = start_column - self.inclusive_start = inclusive_start - - if inclusive_end is None: - inclusive_end = True - elif end_column is None: - raise ValueError( - "Inclusive end was specified but no " "end column was given." - ) - self.end_column = end_column - self.inclusive_end = inclusive_end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.column_family_id == self.column_family_id - and other.start_column == self.start_column - and other.end_column == self.end_column - and other.inclusive_start == self.inclusive_start - and other.inclusive_end == self.inclusive_end - ) - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it - in the ``column_range_filter`` field. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - column_range_kwargs = {"family_name": self.column_family_id} - if self.start_column is not None: - if self.inclusive_start: - key = "start_qualifier_closed" - else: - key = "start_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.start_column) - if self.end_column is not None: - if self.inclusive_end: - key = "end_qualifier_closed" - else: - key = "end_qualifier_open" - column_range_kwargs[key] = _to_bytes(self.end_column) - - column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) - return data_v2_pb2.RowFilter(column_range_filter=column_range) - - -class ValueRegexFilter(_RegexFilter): - """Row filter for a value regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells with values that - match this regex. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(value_regex_filter=self.regex) - - -class ValueRangeFilter(RowFilter): - """A range of values to restrict to in a row filter. - - Will only match cells that have values in this range. - - Both the start and end value can be included or excluded in the range. - By default, we include them both, but this can be changed with optional - flags. - - :type start_value: bytes - :param start_value: The start of the range of values. If no value is used, - the backend applies no lower bound to the values. - - :type end_value: bytes - :param end_value: The end of the range of values. If no value is used, - the backend applies no upper bound to the values. - - :type inclusive_start: bool - :param inclusive_start: Boolean indicating if the start value should be - included in the range (or excluded). Defaults - to :data:`True` if ``start_value`` is passed and - no ``inclusive_start`` was given. - - :type inclusive_end: bool - :param inclusive_end: Boolean indicating if the end value should be - included in the range (or excluded). Defaults - to :data:`True` if ``end_value`` is passed and - no ``inclusive_end`` was given. - - :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_value`` is given or if ``inclusive_end`` - is set but no ``end_value`` is given - """ - - def __init__( - self, start_value=None, end_value=None, inclusive_start=None, inclusive_end=None - ): - if inclusive_start is None: - inclusive_start = True - elif start_value is None: - raise ValueError( - "Inclusive start was specified but no " "start value was given." - ) - self.start_value = start_value - self.inclusive_start = inclusive_start - - if inclusive_end is None: - inclusive_end = True - elif end_value is None: - raise ValueError( - "Inclusive end was specified but no " "end value was given." - ) - self.end_value = end_value - self.inclusive_end = inclusive_end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.start_value == self.start_value - and other.end_value == self.end_value - and other.inclusive_start == self.inclusive_start - and other.inclusive_end == self.inclusive_end - ) - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts to a :class:`.data_v2_pb2.ValueRange` and then uses - it to create a row filter protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - value_range_kwargs = {} - if self.start_value is not None: - if self.inclusive_start: - key = "start_value_closed" - else: - key = "start_value_open" - value_range_kwargs[key] = _to_bytes(self.start_value) - if self.end_value is not None: - if self.inclusive_end: - key = "end_value_closed" - else: - key = "end_value_open" - value_range_kwargs[key] = _to_bytes(self.end_value) - - value_range = data_v2_pb2.ValueRange(**value_range_kwargs) - return data_v2_pb2.RowFilter(value_range_filter=value_range) - - -class _CellCountFilter(RowFilter): - """Row filter that uses an integer count of cells. - - The cell count is used as an offset or a limit for the number - of results returned. - - :type num_cells: int - :param num_cells: An integer count / offset / limit. - """ - - def __init__(self, num_cells): - self.num_cells = num_cells - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.num_cells == self.num_cells - - def __ne__(self, other): - return not self == other - - -class CellsRowOffsetFilter(_CellCountFilter): - """Row filter to skip cells in a row. - - :type num_cells: int - :param num_cells: Skips the first N cells of the row. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) - - -class CellsRowLimitFilter(_CellCountFilter): - """Row filter to limit cells in a row. - - :type num_cells: int - :param num_cells: Matches only the first N cells of the row. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) - - -class CellsColumnLimitFilter(_CellCountFilter): - """Row filter to limit cells in a column. - - :type num_cells: int - :param num_cells: Matches only the most recent N cells within each column. - This filters a (family name, column) pair, based on - timestamps of each cell. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) - - -class StripValueTransformerFilter(_BoolFilter): - """Row filter that transforms cells into empty string (0 bytes). - - :type flag: bool - :param flag: If :data:`True`, replaces each cell's value with the empty - string. As the name indicates, this is more useful as a - transformer than a generic query / filter. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) - - -class ApplyLabelFilter(RowFilter): - """Filter to apply labels to cells. - - Intended to be used as an intermediate filter on a pre-existing filtered - result set. This way if two sets are combined, the label can tell where - the cell(s) originated.This allows the client to determine which results - were produced from which part of the filter. - - .. note:: - - Due to a technical limitation of the backend, it is not currently - possible to apply multiple labels to a cell. - - :type label: str - :param label: Label to apply to cells in the output row. Values must be - at most 15 characters long, and match the pattern - ``[a-z0-9\\-]+``. - """ - - def __init__(self, label): - self.label = label - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.label == self.label - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - return data_v2_pb2.RowFilter(apply_label_transformer=self.label) - - -class _FilterCombination(RowFilter): - """Chain of row filters. - - Sends rows through several filters in sequence. The filters are "chained" - together to process a row. After the first filter is applied, the second - is applied to the filtered output and so on for subsequent filters. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def __init__(self, filters=None): - if filters is None: - filters = [] - self.filters = filters - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.filters == self.filters - - def __ne__(self, other): - return not self == other - - -class RowFilterChain(_FilterCombination): - """Chain of row filters. - - Sends rows through several filters in sequence. The filters are "chained" - together to process a row. After the first filter is applied, the second - is applied to the filtered output and so on for subsequent filters. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - chain = data_v2_pb2.RowFilter.Chain( - filters=[row_filter.to_pb() for row_filter in self.filters] - ) - return data_v2_pb2.RowFilter(chain=chain) - - -class RowFilterUnion(_FilterCombination): - """Union of row filters. - - Sends rows through several filters simultaneously, then - merges / interleaves all the filtered results together. - - If multiple cells are produced with the same column and timestamp, - they will all appear in the output row in an unspecified mutual order. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - interleave = data_v2_pb2.RowFilter.Interleave( - filters=[row_filter.to_pb() for row_filter in self.filters] - ) - return data_v2_pb2.RowFilter(interleave=interleave) - - -class ConditionalRowFilter(RowFilter): - """Conditional row filter which exhibits ternary behavior. - - Executes one of two filters based on another filter. If the ``base_filter`` - returns any cells in the row, then ``true_filter`` is executed. If not, - then ``false_filter`` is executed. - - .. note:: - - The ``base_filter`` does not execute atomically with the true and false - filters, which may lead to inconsistent or unexpected results. - - Additionally, executing a :class:`ConditionalRowFilter` has poor - performance on the server, especially when ``false_filter`` is set. - - :type base_filter: :class:`RowFilter` - :param base_filter: The filter to condition on before executing the - true/false filters. - - :type true_filter: :class:`RowFilter` - :param true_filter: (Optional) The filter to execute if there are any cells - matching ``base_filter``. If not provided, no results - will be returned in the true case. - - :type false_filter: :class:`RowFilter` - :param false_filter: (Optional) The filter to execute if there are no cells - matching ``base_filter``. If not provided, no results - will be returned in the false case. - """ - - def __init__(self, base_filter, true_filter=None, false_filter=None): - self.base_filter = base_filter - self.true_filter = true_filter - self.false_filter = false_filter - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.base_filter == self.base_filter - and other.true_filter == self.true_filter - and other.false_filter == self.false_filter - ) - - def __ne__(self, other): - return not self == other - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_v2_pb2.RowFilter` - :returns: The converted current object. - """ - condition_kwargs = {"predicate_filter": self.base_filter.to_pb()} - if self.true_filter is not None: - condition_kwargs["true_filter"] = self.true_filter.to_pb() - if self.false_filter is not None: - condition_kwargs["false_filter"] = self.false_filter.to_pb() - condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) - return data_v2_pb2.RowFilter(condition=condition) diff --git a/bigtable/google/cloud/bigtable/row_set.py b/bigtable/google/cloud/bigtable/row_set.py deleted file mode 100644 index 0cb6443b05eb..000000000000 --- a/bigtable/google/cloud/bigtable/row_set.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable RowSet """ - - -from google.cloud._helpers import _to_bytes - - -class RowSet(object): - """ Convenience wrapper of google.bigtable.v2.RowSet - - Useful for creating a set of row keys and row ranges, which can - be passed to yield_rows method of class:`.Table.yield_rows`. - """ - - def __init__(self): - self.row_keys = [] - self.row_ranges = [] - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - - if len(other.row_keys) != len(self.row_keys): - return False - - if len(other.row_ranges) != len(self.row_ranges): - return False - - if not set(other.row_keys) == set(self.row_keys): - return False - - if not set(other.row_ranges) == set(self.row_ranges): - return False - - return True - - def __ne__(self, other): - return not self == other - - def add_row_key(self, row_key): - """Add row key to row_keys list. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_add_row_key] - :end-before: [END bigtable_add_row_key] - - :type row_key: bytes - :param row_key: The key of a row to read - """ - self.row_keys.append(row_key) - - def add_row_range(self, row_range): - """Add row_range to row_ranges list. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_add_row_range] - :end-before: [END bigtable_add_row_range] - - :type row_range: class:`RowRange` - :param row_range: The row range object having start and end key - """ - self.row_ranges.append(row_range) - - def add_row_range_from_keys( - self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False - ): - """Add row range to row_ranges list from the row keys - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_range_from_keys] - :end-before: [END bigtable_row_range_from_keys] - - :type start_key: bytes - :param start_key: (Optional) Start key of the row range. If left empty, - will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) End key of the row range. If left empty, - will be interpreted as the empty string and range will - be unbounded on the high end. - - :type start_inclusive: bool - :param start_inclusive: (Optional) Whether the ``start_key`` should be - considered inclusive. The default is True (inclusive). - - :type end_inclusive: bool - :param end_inclusive: (Optional) Whether the ``end_key`` should be - considered inclusive. The default is False (exclusive). - """ - row_range = RowRange(start_key, end_key, start_inclusive, end_inclusive) - self.row_ranges.append(row_range) - - def _update_message_request(self, message): - """Add row keys and row range to given request message - - :type message: class:`data_messages_v2_pb2.ReadRowsRequest` - :param message: The ``ReadRowsRequest`` protobuf - """ - for each in self.row_keys: - message.rows.row_keys.append(_to_bytes(each)) - - for each in self.row_ranges: - r_kwrags = each.get_range_kwargs() - message.rows.row_ranges.add(**r_kwrags) - - -class RowRange(object): - """ Convenience wrapper of google.bigtable.v2.RowRange - - :type start_key: bytes - :param start_key: (Optional) Start key of the row range. If left empty, - will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) End key of the row range. If left empty, - will be interpreted as the empty string and range will - be unbounded on the high end. - - :type start_inclusive: bool - :param start_inclusive: (Optional) Whether the ``start_key`` should be - considered inclusive. The default is True (inclusive). - - :type end_inclusive: bool - :param end_inclusive: (Optional) Whether the ``end_key`` should be - considered inclusive. The default is False (exclusive). - """ - - def __init__( - self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False - ): - self.start_key = start_key - self.start_inclusive = start_inclusive - self.end_key = end_key - self.end_inclusive = end_inclusive - - def _key(self): - """A tuple key that uniquely describes this field. - - Used to compute this instance's hashcode and evaluate equality. - - Returns: - Tuple[str]: The contents of this :class:`.RowRange`. - """ - return (self.start_key, self.start_inclusive, self.end_key, self.end_inclusive) - - def __hash__(self): - return hash(self._key()) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._key() == other._key() - - def __ne__(self, other): - return not self == other - - def get_range_kwargs(self): - """ Convert row range object to dict which can be passed to - google.bigtable.v2.RowRange add method. - """ - range_kwargs = {} - if self.start_key is not None: - start_key_key = "start_key_open" - if self.start_inclusive: - start_key_key = "start_key_closed" - range_kwargs[start_key_key] = _to_bytes(self.start_key) - - if self.end_key is not None: - end_key_key = "end_key_open" - if self.end_inclusive: - end_key_key = "end_key_closed" - range_kwargs[end_key_key] = _to_bytes(self.end_key) - return range_kwargs diff --git a/bigtable/google/cloud/bigtable/table.py b/bigtable/google/cloud/bigtable/table.py deleted file mode 100644 index 69379b21d57e..000000000000 --- a/bigtable/google/cloud/bigtable/table.py +++ /dev/null @@ -1,1133 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User-friendly container for Google Cloud Bigtable Table.""" - - -from grpc import StatusCode - -from google.api_core import timeout -from google.api_core.exceptions import RetryError -from google.api_core.exceptions import NotFound -from google.api_core.retry import if_exception_type -from google.api_core.retry import Retry -from google.api_core.gapic_v1.method import wrap_method -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable.column_family import _gc_rule_from_pb -from google.cloud.bigtable.column_family import ColumnFamily -from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES -from google.cloud.bigtable.policy import Policy -from google.cloud.bigtable.row import AppendRow -from google.cloud.bigtable.row import ConditionalRow -from google.cloud.bigtable.row import DirectRow -from google.cloud.bigtable.row_data import PartialRowsData -from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable import enums -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 as admin_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, -) - -import warnings - - -# Maximum number of mutations in bulk (MutateRowsRequest message): -# (https://cloud.google.com/bigtable/docs/reference/data/rpc/ -# google.bigtable.v2#google.bigtable.v2.MutateRowRequest) -_MAX_BULK_MUTATIONS = 100000 -VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY - - -class _BigtableRetryableError(Exception): - """Retry-able error expected by the default retry strategy.""" - - -DEFAULT_RETRY = Retry( - predicate=if_exception_type(_BigtableRetryableError), - initial=1.0, - maximum=15.0, - multiplier=2.0, - deadline=120.0, # 2 minutes -) -"""The default retry strategy to be used on retry-able errors. - -Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. -""" - - -class TableMismatchError(ValueError): - """Row from another table.""" - - -class TooManyMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" - - -class Table(object): - """Representation of a Google Cloud Bigtable Table. - - .. note:: - - We don't define any properties on a table other than the name. - The only other fields are ``column_families`` and ``granularity``, - The ``column_families`` are not stored locally and - ``granularity`` is an enum with only one value. - - We can use a :class:`Table` to: - - * :meth:`create` the table - * :meth:`delete` the table - * :meth:`list_column_families` in the table - - :type table_id: str - :param table_id: The ID of the table. - - :type instance: :class:`~google.cloud.bigtable.instance.Instance` - :param instance: The instance that owns the table. - - :type app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - """ - - def __init__(self, table_id, instance, mutation_timeout=None, app_profile_id=None): - self.table_id = table_id - self._instance = instance - self._app_profile_id = app_profile_id - self.mutation_timeout = mutation_timeout - - @property - def name(self): - """Table name used in requests. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_name] - :end-before: [END bigtable_table_name] - - .. note:: - - This property will not change if ``table_id`` does not, but the - return value is not cached. - - The table name is of the form - - ``"projects/../instances/../tables/{table_id}"`` - - :rtype: str - :returns: The table name. - """ - project = self._instance._client.project - instance_id = self._instance.instance_id - table_client = self._instance._client.table_data_client - return table_client.table_path( - project=project, instance=instance_id, table=self.table_id - ) - - def get_iam_policy(self): - """Gets the IAM access control policy for this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_get_iam_policy] - :end-before: [END bigtable_table_get_iam_policy] - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this table. - """ - table_client = self._instance._client.table_admin_client - resp = table_client.get_iam_policy(resource=self.name) - return Policy.from_pb(resp) - - def set_iam_policy(self, policy): - """Sets the IAM access control policy for this table. Replaces any - existing policy. - - For more information about policy, please see documentation of - class `google.cloud.bigtable.policy.Policy` - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_set_iam_policy] - :end-before: [END bigtable_table_set_iam_policy] - - :type policy: :class:`google.cloud.bigtable.policy.Policy` - :param policy: A new IAM policy to replace the current IAM policy - of this table. - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this table. - """ - table_client = self._instance._client.table_admin_client - resp = table_client.set_iam_policy(resource=self.name, policy=policy.to_pb()) - return Policy.from_pb(resp) - - def test_iam_permissions(self, permissions): - """Tests whether the caller has the given permissions for this table. - Returns the permissions that the caller has. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_test_iam_permissions] - :end-before: [END bigtable_table_test_iam_permissions] - - :type permissions: list - :param permissions: The set of permissions to check for - the ``resource``. Permissions with wildcards (such as '*' - or 'storage.*') are not allowed. For more information see - `IAM Overview - `_. - `Bigtable Permissions - `_. - - :rtype: list - :returns: A List(string) of permissions allowed on the table. - """ - table_client = self._instance._client.table_admin_client - resp = table_client.test_iam_permissions( - resource=self.name, permissions=permissions - ) - return list(resp.permissions) - - def column_family(self, column_family_id, gc_rule=None): - """Factory to create a column family associated with this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_column_family] - :end-before: [END bigtable_table_column_family] - - :type column_family_id: str - :param column_family_id: The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type gc_rule: :class:`.GarbageCollectionRule` - :param gc_rule: (Optional) The garbage collection settings for this - column family. - - :rtype: :class:`.ColumnFamily` - :returns: A column family owned by this table. - """ - return ColumnFamily(column_family_id, self, gc_rule=gc_rule) - - def row(self, row_key, filter_=None, append=False): - """Factory to create a row associated with this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_row] - :end-before: [END bigtable_table_row] - - .. warning:: - - At most one of ``filter_`` and ``append`` can be used in a - :class:`~google.cloud.bigtable.row.Row`. - - :type row_key: bytes - :param row_key: The key for the row being created. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) Filter to be used for conditional mutations. - See :class:`.ConditionalRow` for more details. - - :type append: bool - :param append: (Optional) Flag to determine if the row should be used - for append mutations. - - :rtype: :class:`~google.cloud.bigtable.row.Row` - :returns: A row owned by this table. - :raises: :class:`ValueError ` if both - ``filter_`` and ``append`` are used. - """ - warnings.warn( - "This method will be deprecated in future versions. Please " - "use Table.append_row(), Table.conditional_row() " - "and Table.direct_row() methods instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - if append and filter_ is not None: - raise ValueError("At most one of filter_ and append can be set") - if append: - return AppendRow(row_key, self) - elif filter_ is not None: - return ConditionalRow(row_key, self, filter_=filter_) - else: - return DirectRow(row_key, self) - - def append_row(self, row_key): - """Create a :class:`~google.cloud.bigtable.row.AppendRow` associated with this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_append_row] - :end-before: [END bigtable_table_append_row] - - Args: - row_key (bytes): The key for the row being created. - - Returns: - A row owned by this table. - """ - return AppendRow(row_key, self) - - def direct_row(self, row_key): - """Create a :class:`~google.cloud.bigtable.row.DirectRow` associated with this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_direct_row] - :end-before: [END bigtable_table_direct_row] - - Args: - row_key (bytes): The key for the row being created. - - Returns: - A row owned by this table. - """ - return DirectRow(row_key, self) - - def conditional_row(self, row_key, filter_): - """Create a :class:`~google.cloud.bigtable.row.ConditionalRow` associated with this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_conditional_row] - :end-before: [END bigtable_table_conditional_row] - - Args: - row_key (bytes): The key for the row being created. - - filter_ (:class:`.RowFilter`): (Optional) Filter to be used for - conditional mutations. See :class:`.ConditionalRow` for more details. - - Returns: - A row owned by this table. - """ - return ConditionalRow(row_key, self, filter_=filter_) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other.table_id == self.table_id and other._instance == self._instance - - def __ne__(self, other): - return not self == other - - def create(self, initial_split_keys=[], column_families={}): - """Creates this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_table] - :end-before: [END bigtable_create_table] - - .. note:: - - A create request returns a - :class:`._generated.table_pb2.Table` but we don't use - this response. - - :type initial_split_keys: list - :param initial_split_keys: (Optional) list of row keys in bytes that - will be used to initially split the table - into several tablets. - - :type column_families: dict - :param column_failies: (Optional) A map columns to create. The key is - the column_id str and the value is a - :class:`GarbageCollectionRule` - """ - table_client = self._instance._client.table_admin_client - instance_name = self._instance.name - - families = { - id: ColumnFamily(id, self, rule).to_pb() - for (id, rule) in column_families.items() - } - table = admin_messages_v2_pb2.Table(column_families=families) - - split = table_admin_messages_v2_pb2.CreateTableRequest.Split - splits = [split(key=_to_bytes(key)) for key in initial_split_keys] - - table_client.create_table( - parent=instance_name, - table_id=self.table_id, - table=table, - initial_splits=splits, - ) - - def exists(self): - """Check whether the table exists. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_check_table_exists] - :end-before: [END bigtable_check_table_exists] - - :rtype: bool - :returns: True if the table exists, else False. - """ - table_client = self._instance._client.table_admin_client - try: - table_client.get_table(name=self.name, view=VIEW_NAME_ONLY) - return True - except NotFound: - return False - - def delete(self): - """Delete this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_delete_table] - :end-before: [END bigtable_delete_table] - - """ - table_client = self._instance._client.table_admin_client - table_client.delete_table(name=self.name) - - def list_column_families(self): - """List the column families owned by this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_list_column_families] - :end-before: [END bigtable_list_column_families] - - :rtype: dict - :returns: Dictionary of column families attached to this table. Keys - are strings (column family names) and values are - :class:`.ColumnFamily` instances. - :raises: :class:`ValueError ` if the column - family name from the response does not agree with the computed - name from the column family ID. - """ - table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name) - - result = {} - for column_family_id, value_pb in table_pb.column_families.items(): - gc_rule = _gc_rule_from_pb(value_pb.gc_rule) - column_family = self.column_family(column_family_id, gc_rule=gc_rule) - result[column_family_id] = column_family - return result - - def get_cluster_states(self): - """List the cluster states owned by this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_get_cluster_states] - :end-before: [END bigtable_get_cluster_states] - - :rtype: dict - :returns: Dictionary of cluster states for this table. - Keys are cluster ids and values are - :class: 'ClusterState' instances. - """ - - REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW - table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW) - - return { - cluster_id: ClusterState(value_pb.replication_state) - for cluster_id, value_pb in table_pb.cluster_states.items() - } - - def read_row(self, row_key, filter_=None): - """Read a single row from this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_read_row] - :end-before: [END bigtable_read_row] - - :type row_key: bytes - :param row_key: The key of the row to read from. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - row. If unset, returns the entire row. - - :rtype: :class:`.PartialRowData`, :data:`NoneType ` - :returns: The contents of the row if any chunks were returned in - the response, otherwise :data:`None`. - :raises: :class:`ValueError ` if a commit row - chunk is never encountered. - """ - row_set = RowSet() - row_set.add_row_key(row_key) - result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set)) - row = next(result_iter, None) - if next(result_iter, None) is not None: - raise ValueError("More than one row was returned.") - return row - - def read_rows( - self, - start_key=None, - end_key=None, - limit=None, - filter_=None, - end_inclusive=False, - row_set=None, - retry=DEFAULT_RETRY_READ_ROWS, - ): - """Read rows from this table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_read_rows] - :end-before: [END bigtable_read_rows] - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads every column in - each row. - - :type end_inclusive: bool - :param end_inclusive: (Optional) Whether the ``end_key`` should be - considered inclusive. The default is False (exclusive). - - :type row_set: :class:`row_set.RowSet` - :param row_set: (Optional) The row set containing multiple row keys and - row_ranges. - - :type retry: :class:`~google.api_core.retry.Retry` - :param retry: - (Optional) Retry delay and deadline arguments. To override, the - default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and - modified with the :meth:`~google.api_core.retry.Retry.with_delay` - method or the :meth:`~google.api_core.retry.Retry.with_deadline` - method. - - :rtype: :class:`.PartialRowsData` - :returns: A :class:`.PartialRowsData` a generator for consuming - the streamed results. - """ - request_pb = _create_row_request( - self.name, - start_key=start_key, - end_key=end_key, - filter_=filter_, - limit=limit, - end_inclusive=end_inclusive, - app_profile_id=self._app_profile_id, - row_set=row_set, - ) - data_client = self._instance._client.table_data_client - return PartialRowsData(data_client.transport.read_rows, request_pb, retry) - - def yield_rows(self, **kwargs): - """Read rows from this table. - - .. warning:: - This method will be removed in future releases. Please use - ``read_rows`` instead. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads every column in - each row. - - :type row_set: :class:`row_set.RowSet` - :param row_set: (Optional) The row set containing multiple row keys and - row_ranges. - - :rtype: :class:`.PartialRowData` - :returns: A :class:`.PartialRowData` for each row returned - """ - warnings.warn( - "`yield_rows()` is deprecated; use `read_rows()` instead", - DeprecationWarning, - stacklevel=2, - ) - return self.read_rows(**kwargs) - - def mutate_rows(self, rows, retry=DEFAULT_RETRY): - """Mutates multiple rows in bulk. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_mutate_rows] - :end-before: [END bigtable_mutate_rows] - - The method tries to update all specified rows. - If some of the rows weren't updated, it would not remove mutations. - They can be applied to the row separately. - If row mutations finished successfully, they would be cleaned up. - - Optionally, a ``retry`` strategy can be specified to re-attempt - mutations on rows that return transient errors. This method will retry - until all rows succeed or until the request deadline is reached. To - specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0`` - can be specified. - - :type rows: list - :param rows: List or other iterable of :class:`.DirectRow` instances. - - :type retry: :class:`~google.api_core.retry.Retry` - :param retry: - (Optional) Retry delay and deadline arguments. To override, the - default value :attr:`DEFAULT_RETRY` can be used and modified with - the :meth:`~google.api_core.retry.Retry.with_delay` method or the - :meth:`~google.api_core.retry.Retry.with_deadline` method. - - :rtype: list - :returns: A list of response statuses (`google.rpc.status_pb2.Status`) - corresponding to success or failure of each row mutation - sent. These will be in the same order as the `rows`. - """ - retryable_mutate_rows = _RetryableMutateRowsWorker( - self._instance._client, - self.name, - rows, - app_profile_id=self._app_profile_id, - timeout=self.mutation_timeout, - ) - return retryable_mutate_rows(retry=retry) - - def sample_row_keys(self): - """Read a sample of row keys in the table. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_sample_row_keys] - :end-before: [END bigtable_sample_row_keys] - - The returned row keys will delimit contiguous sections of the table of - approximately equal size, which can be used to break up the data for - distributed tasks like mapreduces. - - The elements in the iterator are a SampleRowKeys response and they have - the properties ``offset_bytes`` and ``row_key``. They occur in sorted - order. The table might have contents before the first row key in the - list and after the last one, but a key containing the empty string - indicates "end of table" and will be the last response given, if - present. - - .. note:: - - Row keys in this list may not have ever been written to or read - from, and users should therefore not make any assumptions about the - row key structure that are specific to their use case. - - The ``offset_bytes`` field on a response indicates the approximate - total storage space used by all rows in the table which precede - ``row_key``. Buffering the contents of all rows between two subsequent - samples would require space roughly equal to the difference in their - ``offset_bytes`` fields. - - :rtype: :class:`~google.cloud.exceptions.GrpcRendezvous` - :returns: A cancel-able iterator. Can be consumed by calling ``next()`` - or by casting to a :class:`list` and can be cancelled by - calling ``cancel()``. - """ - data_client = self._instance._client.table_data_client - response_iterator = data_client.sample_row_keys( - self.name, app_profile_id=self._app_profile_id - ) - - return response_iterator - - def truncate(self, timeout=None): - """Truncate the table - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_truncate_table] - :end-before: [END bigtable_truncate_table] - - :type timeout: float - :param timeout: (Optional) The amount of time, in seconds, to wait - for the request to complete. - - :raise: google.api_core.exceptions.GoogleAPICallError: If the - request failed for any reason. - google.api_core.exceptions.RetryError: If the request failed - due to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - client = self._instance._client - table_admin_client = client.table_admin_client - if timeout: - table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True, timeout=timeout - ) - else: - table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True - ) - - def drop_by_prefix(self, row_key_prefix, timeout=None): - """ - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_drop_by_prefix] - :end-before: [END bigtable_drop_by_prefix] - - :type row_prefix: bytes - :param row_prefix: Delete all rows that start with this row key - prefix. Prefix cannot be zero length. - - :type timeout: float - :param timeout: (Optional) The amount of time, in seconds, to wait - for the request to complete. - - :raise: google.api_core.exceptions.GoogleAPICallError: If the - request failed for any reason. - google.api_core.exceptions.RetryError: If the request failed - due to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - client = self._instance._client - table_admin_client = client.table_admin_client - if timeout: - table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix), timeout=timeout - ) - else: - table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix) - ) - - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - """Factory to create a mutation batcher associated with this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_mutations_batcher] - :end-before: [END bigtable_mutations_batcher] - - :type table: class - :param table: class:`~google.cloud.bigtable.table.Table`. - - :type flush_count: int - :param flush_count: (Optional) Maximum number of rows per batch. If it - reaches the max number of rows it calls finish_batch() to - mutate the current row batch. Default is FLUSH_COUNT (1000 - rows). - - :type max_row_bytes: int - :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it - calls finish_batch() to mutate the current row batch. - Default is MAX_ROW_BYTES (5 MB). - """ - return MutationsBatcher(self, flush_count, max_row_bytes) - - -class _RetryableMutateRowsWorker(object): - """A callable worker that can retry to mutate rows with transient errors. - - This class is a callable that can retry mutating rows that result in - transient errors. After all rows are successful or none of the rows - are retryable, any subsequent call on this callable will be a no-op. - """ - - # pylint: disable=unsubscriptable-object - RETRY_CODES = ( - StatusCode.DEADLINE_EXCEEDED.value[0], - StatusCode.ABORTED.value[0], - StatusCode.UNAVAILABLE.value[0], - ) - # pylint: enable=unsubscriptable-object - - def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): - self.client = client - self.table_name = table_name - self.rows = rows - self.app_profile_id = app_profile_id - self.responses_statuses = [None] * len(self.rows) - self.timeout = timeout - - def __call__(self, retry=DEFAULT_RETRY): - """Attempt to mutate all rows and retry rows with transient errors. - - Will retry the rows with transient errors until all rows succeed or - ``deadline`` specified in the `retry` is reached. - - :rtype: list - :returns: A list of response statuses (`google.rpc.status_pb2.Status`) - corresponding to success or failure of each row mutation - sent. These will be in the same order as the ``rows``. - """ - mutate_rows = self._do_mutate_retryable_rows - if retry: - mutate_rows = retry(self._do_mutate_retryable_rows) - - try: - mutate_rows() - except (_BigtableRetryableError, RetryError): - # - _BigtableRetryableError raised when no retry strategy is used - # and a retryable error on a mutation occurred. - # - RetryError raised when retry deadline is reached. - # In both cases, just return current `responses_statuses`. - pass - - return self.responses_statuses - - @staticmethod - def _is_retryable(status): - return status is None or status.code in _RetryableMutateRowsWorker.RETRY_CODES - - def _do_mutate_retryable_rows(self): - """Mutate all the rows that are eligible for retry. - - A row is eligible for retry if it has not been tried or if it resulted - in a transient error in a previous call. - - :rtype: list - :return: The responses statuses, which is a list of - :class:`~google.rpc.status_pb2.Status`. - :raises: One of the following: - - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - """ - retryable_rows = [] - index_into_all_rows = [] - for index, status in enumerate(self.responses_statuses): - if self._is_retryable(status): - retryable_rows.append(self.rows[index]) - index_into_all_rows.append(index) - - if not retryable_rows: - # All mutations are either successful or non-retryable now. - return self.responses_statuses - - mutate_rows_request = _mutate_rows_request( - self.table_name, retryable_rows, app_profile_id=self.app_profile_id - ) - data_client = self.client.table_data_client - inner_api_calls = data_client._inner_api_calls - if "mutate_rows" not in inner_api_calls: - default_retry = (data_client._method_configs["MutateRows"].retry,) - if self.timeout is None: - default_timeout = data_client._method_configs["MutateRows"].timeout - else: - default_timeout = timeout.ExponentialTimeout(deadline=self.timeout) - data_client._inner_api_calls["mutate_rows"] = wrap_method( - data_client.transport.mutate_rows, - default_retry=default_retry, - default_timeout=default_timeout, - client_info=data_client._client_info, - ) - - responses = data_client._inner_api_calls["mutate_rows"]( - mutate_rows_request, retry=None - ) - - num_responses = 0 - num_retryable_responses = 0 - for response in responses: - for entry in response.entries: - num_responses += 1 - index = index_into_all_rows[entry.index] - self.responses_statuses[index] = entry.status - if self._is_retryable(entry.status): - num_retryable_responses += 1 - if entry.status.code == 0: - self.rows[index].clear() - - if len(retryable_rows) != num_responses: - raise RuntimeError( - "Unexpected number of responses", - num_responses, - "Expected", - len(retryable_rows), - ) - - if num_retryable_responses: - raise _BigtableRetryableError - - return self.responses_statuses - - -class ClusterState(object): - """Representation of a Cluster State. - - :type replication_state: int - :param replication_state: enum value for cluster state - Possible replications_state values are - 0 for STATE_NOT_KNOWN: The replication state of the table is - unknown in this cluster. - 1 for INITIALIZING: The cluster was recently created, and the - table must finish copying - over pre-existing data from other clusters before it can - begin receiving live replication updates and serving - ``Data API`` requests. - 2 for PLANNED_MAINTENANCE: The table is temporarily unable to - serve - ``Data API`` requests from this - cluster due to planned internal maintenance. - 3 for UNPLANNED_MAINTENANCE: The table is temporarily unable - to serve - ``Data API`` requests from this - cluster due to unplanned or emergency maintenance. - 4 for READY: The table can serve - ``Data API`` requests from this - cluster. Depending on replication delay, reads may not - immediately reflect the state of the table in other clusters. - """ - - def __init__(self, replication_state): - self.replication_state = replication_state - - def __repr__(self): - """Representation of cluster state instance as string value - for cluster state. - - :rtype: ClusterState instance - :returns: ClusterState instance as representation of string - value for cluster state. - """ - replication_dict = { - enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN", - enums.Table.ReplicationState.INITIALIZING: "INITIALIZING", - enums.Table.ReplicationState.PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", - enums.Table.ReplicationState.UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", - enums.Table.ReplicationState.READY: "READY", - } - return replication_dict[self.replication_state] - - def __eq__(self, other): - """Checks if two ClusterState instances(self and other) are - equal on the basis of instance variable 'replication_state'. - - :type other: ClusterState - :param other: ClusterState instance to compare with. - - :rtype: Boolean value - :returns: True if two cluster state instances have same - replication_state. - """ - if not isinstance(other, self.__class__): - return False - return self.replication_state == other.replication_state - - def __ne__(self, other): - """Checks if two ClusterState instances(self and other) are - not equal. - - :type other: ClusterState. - :param other: ClusterState instance to compare with. - - :rtype: Boolean value. - :returns: True if two cluster state instances are not equal. - """ - return not self == other - - -def _create_row_request( - table_name, - start_key=None, - end_key=None, - filter_=None, - limit=None, - end_inclusive=False, - app_profile_id=None, - row_set=None, -): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :type end_inclusive: bool - :param end_inclusive: (Optional) Whether the ``end_key`` should be - considered inclusive. The default is False (exclusive). - - :type: app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - - :type row_set: :class:`row_set.RowSet` - :param row_set: (Optional) The row set containing multiple row keys and - row_ranges. - - :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_set`` and one of ``start_key`` or ``end_key`` are set - """ - request_kwargs = {"table_name": table_name} - if (start_key is not None or end_key is not None) and row_set is not None: - raise ValueError("Row range and row set cannot be " "set simultaneously") - - if filter_ is not None: - request_kwargs["filter"] = filter_.to_pb() - if limit is not None: - request_kwargs["rows_limit"] = limit - if app_profile_id is not None: - request_kwargs["app_profile_id"] = app_profile_id - - message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - - if start_key is not None or end_key is not None: - row_set = RowSet() - row_set.add_row_range(RowRange(start_key, end_key, end_inclusive=end_inclusive)) - - if row_set is not None: - row_set._update_message_request(message) - - return message - - -def _mutate_rows_request(table_name, rows, app_profile_id=None): - """Creates a request to mutate rows in a table. - - :type table_name: str - :param table_name: The name of the table to write to. - - :type rows: list - :param rows: List or other iterable of :class:`.DirectRow` instances. - - :type: app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - - :rtype: :class:`data_messages_v2_pb2.MutateRowsRequest` - :returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs. - :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is - greater than 100,000 - """ - request_pb = data_messages_v2_pb2.MutateRowsRequest( - table_name=table_name, app_profile_id=app_profile_id - ) - mutations_count = 0 - for row in rows: - _check_row_table_name(table_name, row) - _check_row_type(row) - mutations = row._get_mutations() - request_pb.entries.add(row_key=row.row_key, mutations=mutations) - mutations_count += len(mutations) - if mutations_count > _MAX_BULK_MUTATIONS: - raise TooManyMutationsError( - "Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,) - ) - return request_pb - - -def _check_row_table_name(table_name, row): - """Checks that a row belongs to a table. - - :type table_name: str - :param table_name: The name of the table. - - :type row: :class:`~google.cloud.bigtable.row.Row` - :param row: An instance of :class:`~google.cloud.bigtable.row.Row` - subclasses. - - :raises: :exc:`~.table.TableMismatchError` if the row does not belong to - the table. - """ - if row.table is not None and row.table.name != table_name: - raise TableMismatchError( - "Row %s is a part of %s table. Current table: %s" - % (row.row_key, row.table.name, table_name) - ) - - -def _check_row_type(row): - """Checks that a row is an instance of :class:`.DirectRow`. - - :type row: :class:`~google.cloud.bigtable.row.Row` - :param row: An instance of :class:`~google.cloud.bigtable.row.Row` - subclasses. - - :raises: :class:`TypeError ` if the row is not an - instance of DirectRow. - """ - if not isinstance(row, DirectRow): - raise TypeError( - "Bulk processing can not be applied for " "conditional or append mutations." - ) diff --git a/bigtable/google/cloud/bigtable_admin_v2/__init__.py b/bigtable/google/cloud/bigtable_admin_v2/__init__.py deleted file mode 100644 index 876859fe058e..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_admin_v2 import types -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client -from google.cloud.bigtable_admin_v2.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableInstanceAdminClient( - bigtable_instance_admin_client.BigtableInstanceAdminClient -): - __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ - enums = enums - - -class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): - __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ - enums = enums - - -__all__ = ( - "enums", - "types", - "BigtableInstanceAdminClient", - "BigtableTableAdminClient", -) diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py deleted file mode 100644 index 0724c3822a3d..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ /dev/null @@ -1,1890 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_instance_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version - - -class BigtableInstanceAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def app_profile_path(cls, project, instance, app_profile): - """Return a fully-qualified app_profile string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}", - project=project, - instance=instance, - app_profile=app_profile, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableInstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_instance( - self, - parent, - instance_id, - instance, - clusters, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `clusters`: - >>> clusters = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance, clusters) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): The unique name of the project in which to create the new instance. - Values are of the form ``projects/``. - instance_id (str): The ID to be used when referring to the new instance within its project, - e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The instance to create. Fields marked ``OutputOnly`` must be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): The clusters to be created within the instance, mapped by desired - cluster ID, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields - marked ``OutputOnly`` must be left blank. Currently, at most two - clusters can be specified. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, - ) - - def get_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): The unique name of the requested instance. Values are of the form - ``projects//instances/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about instances in a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> response = client.list_instances(parent) - - Args: - parent (str): The unique name of the project for which a list of instances is - requested. Values are of the form ``projects/``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_instances"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_instance( - self, - name, - display_name, - type_, - labels, - state=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> from google.cloud.bigtable_admin_v2 import enums - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `display_name`: - >>> display_name = '' - >>> - >>> # TODO: Initialize `type_`: - >>> type_ = enums.Instance.Type.TYPE_UNSPECIFIED - >>> - >>> # TODO: Initialize `labels`: - >>> labels = {} - >>> - >>> response = client.update_instance(name, display_name, type_, labels) - - Args: - name (str): (``OutputOnly``) The unique name of the instance. Values are of the form - ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name (str): The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and - aggregate metrics. - - - Label keys must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and must - conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given resource. - - Keys and values must both be under 128 bytes. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Instance( - name=name, - display_name=display_name, - type=type_, - labels=labels, - state=state, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partial_update_instance( - self, - instance, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Partially updates an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.partial_update_instance(instance, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The Instance which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of Instance fields which should be replaced. - Must be explicitly set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partial_update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "partial_update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self._method_configs["PartialUpdateInstance"].retry, - default_timeout=self._method_configs["PartialUpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["partial_update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Delete an instance from a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): The unique name of the instance to be deleted. Values are of the form - ``projects//instances/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_cluster( - self, - parent, - cluster_id, - cluster, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(parent, cluster_id, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): The unique name of the instance in which to create the new cluster. - Values are of the form ``projects//instances/``. - cluster_id (str): The ID to be used when referring to the new cluster within its instance, - e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left - blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, - ) - - def get_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a cluster. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> response = client.get_cluster(name) - - Args: - name (str): The unique name of the requested cluster. Values are of the form - ``projects//instances//clusters/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about clusters in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.list_clusters(parent) - - Args: - parent (str): The unique name of the instance for which a list of clusters is - requested. Values are of the form - ``projects//instances/``. Use `` = '-'`` to - list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_cluster( - self, - name, - serve_nodes, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `serve_nodes`: - >>> serve_nodes = 0 - >>> - >>> response = client.update_cluster(name, serve_nodes) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): (``OutputOnly``) The unique name of the cluster. Values are of the form - ``projects//instances//clusters/[a-z][-a-z0-9]*``. - serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher - throughput and more consistent performance. - location (str): (``CreationOnly``) The location where this cluster's nodes and storage - reside. For best performance, clients should be located as close as - possible to this cluster. Currently only zones are supported, so values - should be of the form ``projects//locations/``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve its - parent instance's tables, unless explicitly overridden. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Cluster( - name=name, - serve_nodes=serve_nodes, - location=location, - state=state, - default_storage_type=default_storage_type, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, - ) - - def delete_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> client.delete_cluster(name) - - Args: - name (str): The unique name of the cluster to be deleted. Values are of the form - ``projects//instances//clusters/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_app_profile( - self, - parent, - app_profile_id, - app_profile, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `app_profile_id`: - >>> app_profile_id = '' - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> response = client.create_app_profile(parent, app_profile_id, app_profile) - - Args: - parent (str): The unique name of the instance in which to create the new app profile. - Values are of the form ``projects//instances/``. - app_profile_id (str): The ID to be used when referring to the new app profile within its - instance, e.g., just ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile to be created. Fields marked ``OutputOnly`` will be - ignored. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - ignore_warnings (bool): If true, ignore safety checks when creating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "create_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_app_profile, - default_retry=self._method_configs["CreateAppProfile"].retry, - default_timeout=self._method_configs["CreateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, - app_profile_id=app_profile_id, - app_profile=app_profile, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_app_profile( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an app profile. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> response = client.get_app_profile(name) - - Args: - name (str): The unique name of the requested app profile. Values are of the form - ``projects//instances//appProfiles/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "get_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_app_profile, - default_retry=self._method_configs["GetAppProfile"].retry, - default_timeout=self._method_configs["GetAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_app_profiles( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about app profiles in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_app_profiles(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_app_profiles(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): The unique name of the instance for which a list of app profiles is - requested. Values are of the form - ``projects//instances/``. Use `` = '-'`` to - list AppProfiles for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_size (int): Maximum number of results per page. - CURRENTLY UNIMPLEMENTED AND IGNORED. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_app_profiles" not in self._inner_api_calls: - self._inner_api_calls[ - "list_app_profiles" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_app_profiles, - default_retry=self._method_configs["ListAppProfiles"].retry, - default_timeout=self._method_configs["ListAppProfiles"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_app_profiles"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="app_profiles", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_app_profile( - self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_app_profile(app_profile, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced. - If unset, all fields will be replaced. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - ignore_warnings (bool): If true, ignore safety checks when updating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "update_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs["UpdateAppProfile"].retry, - default_timeout=self._method_configs["UpdateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, - update_mask=update_mask, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("app_profile.name", app_profile.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.AppProfile, - metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, - ) - - def delete_app_profile( - self, - name, - ignore_warnings, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an app profile from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> # TODO: Initialize `ignore_warnings`: - >>> ignore_warnings = False - >>> - >>> client.delete_app_profile(name, ignore_warnings) - - Args: - name (str): The unique name of the app profile to be deleted. Values are of the form - ``projects//instances//appProfiles/``. - ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_app_profile, - default_retry=self._method_configs["DeleteAppProfile"].retry, - default_timeout=self._method_configs["DeleteAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py deleted file mode 100644 index b2ec35e0146e..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ /dev/null @@ -1,136 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableInstanceAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - }, - "methods": { - "CreateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "GetInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListInstances": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "PartialUpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListAppProfiles": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - }, - } - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py deleted file mode 100644 index 9ccd58471455..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ /dev/null @@ -1,1672 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version - - -class BigtableTableAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def snapshot_path(cls, project, instance, cluster, snapshot): - """Return a fully-qualified snapshot string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableTableAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_table_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_table( - self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `table`: - >>> table = {} - >>> - >>> response = client.create_table(parent, table_id, table) - - Args: - parent (str): The unique name of the instance in which to create the table. Values are - of the form ``projects//instances/``. - table_id (str): The name by which the new table should be referred to within the parent - instance, e.g., ``foobar`` rather than ``/tables/foobar``. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the - table into several tablets (tablets are similar to HBase regions). Given - two split keys, ``s1`` and ``s2``, three tablets will be created, - spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial\_split\_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Split` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs["CreateTable"].retry, - default_timeout=self._method_configs["CreateTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_table_from_snapshot( - self, - parent, - table_id, - source_snapshot, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `source_snapshot`: - >>> source_snapshot = '' - >>> - >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): The unique name of the instance in which to create the table. Values are - of the form ``projects//instances/``. - table_id (str): The name by which the new table should be referred to within the parent - instance, e.g., ``foobar`` rather than ``/tables/foobar``. - source_snapshot (str): The unique name of the snapshot from which to restore the table. The - snapshot and the table must be in the same instance. Values are of the - form - ``projects//instances//clusters//snapshots/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table_from_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table_from_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table_from_snapshot, - default_retry=self._method_configs["CreateTableFromSnapshot"].retry, - default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_table_from_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, - ) - - def list_tables( - self, - parent, - view=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all tables served from a specified instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_tables(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): The unique name of the instance for which tables should be listed. - Values are of the form ``projects//instances/``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Defaults to - ``NAME_ONLY`` if unspecified; no others are currently supported. - page_size (int): Maximum number of results per page. - CURRENTLY UNIMPLEMENTED AND IGNORED. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_tables" not in self._inner_api_calls: - self._inner_api_calls[ - "list_tables" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs["ListTables"].retry, - default_timeout=self._method_configs["ListTables"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, view=view, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_tables"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="tables", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_table( - self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified table. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.get_table(name) - - Args: - name (str): The unique name of the requested table. Values are of the form - ``projects//instances//tables/
``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to - ``SCHEMA_VIEW`` if unspecified. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_table" not in self._inner_api_calls: - self._inner_api_calls[ - "get_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs["GetTable"].retry, - default_timeout=self._method_configs["GetTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_table( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes a specified table and all of its data. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.delete_table(name) - - Args: - name (str): The unique name of the table to be deleted. Values are of the form - ``projects//instances//tables/
``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_table" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs["DeleteTable"].retry, - default_timeout=self._method_configs["DeleteTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def modify_column_families( - self, - name, - modifications, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `modifications`: - >>> modifications = [] - >>> - >>> response = client.modify_column_families(name, modifications) - - Args: - name (str): The unique name of the table whose families should be modified. Values - are of the form - ``projects//instances//tables/
``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Modifications to be atomically applied to the specified table's families. - Entries are applied in order, meaning that earlier modifications can be - masked by later ones (in the case of repeated updates to the same family, - for example). - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Modification` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "modify_column_families" not in self._inner_api_calls: - self._inner_api_calls[ - "modify_column_families" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs["ModifyColumnFamilies"].retry, - default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["modify_column_families"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def drop_row_range( - self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.drop_row_range(name) - - Args: - name (str): The unique name of the table on which to drop a range of rows. Values - are of the form - ``projects//instances//tables/
``. - row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be - zero length. - delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_row_range" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_row_range" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs["DropRowRange"].retry, - default_timeout=self._method_configs["DropRowRange"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - - request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name, - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_row_range"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def generate_consistency_token( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.generate_consistency_token(name) - - Args: - name (str): The unique name of the Table for which to create a consistency token. - Values are of the form - ``projects//instances//tables/
``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "generate_consistency_token" not in self._inner_api_calls: - self._inner_api_calls[ - "generate_consistency_token" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self._method_configs["GenerateConsistencyToken"].retry, - default_timeout=self._method_configs[ - "GenerateConsistencyToken" - ].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["generate_consistency_token"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_consistency( - self, - name, - consistency_token, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `consistency_token`: - >>> consistency_token = '' - >>> - >>> response = client.check_consistency(name, consistency_token) - - Args: - name (str): The unique name of the Table for which to check replication consistency. - Values are of the form - ``projects//instances//tables/
``. - consistency_token (str): The token created using GenerateConsistencyToken for the Table. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_consistency" not in self._inner_api_calls: - self._inner_api_calls[ - "check_consistency" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs["CheckConsistency"].retry, - default_timeout=self._method_configs["CheckConsistency"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_consistency"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a table resource. Replaces any existing - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified table resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def snapshot_table( - self, - name, - cluster, - snapshot_id, - description, - ttl=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = '' - >>> - >>> # TODO: Initialize `snapshot_id`: - >>> snapshot_id = '' - >>> - >>> # TODO: Initialize `description`: - >>> description = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id, description) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): The unique name of the table to have the snapshot taken. Values are of - the form ``projects//instances//tables/
``. - cluster (str): The name of the cluster where the snapshot will be created in. Values - are of the form - ``projects//instances//clusters/``. - snapshot_id (str): The ID by which the new snapshot should be referred to within the parent - cluster, e.g., ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects//instances//clusters//snapshots/mysnapshot``. - description (str): Description of the snapshot. - ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is - created. Once 'ttl' expires, the snapshot will get deleted. The maximum - amount of time a snapshot can stay active is 7 days. If 'ttl' is not - specified, the default value of 24 hours will be used. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Duration` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "snapshot_table" not in self._inner_api_calls: - self._inner_api_calls[ - "snapshot_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs["SnapshotTable"].retry, - default_timeout=self._method_configs["SnapshotTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - description=description, - ttl=ttl, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["snapshot_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Snapshot, - metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, - ) - - def get_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.get_snapshot(name) - - Args: - name (str): The unique name of the requested snapshot. Values are of the form - ``projects//instances//clusters//snapshots/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "get_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_snapshot, - default_retry=self._method_configs["GetSnapshot"].retry, - default_timeout=self._method_configs["GetSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_snapshots( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_snapshots(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_snapshots(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): The unique name of the cluster for which snapshots should be listed. - Values are of the form - ``projects//instances//clusters/``. Use - `` = '-'`` to list snapshots for all clusters in an instance, - e.g., ``projects//instances//clusters/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_snapshots" not in self._inner_api_calls: - self._inner_api_calls[ - "list_snapshots" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_snapshots, - default_retry=self._method_configs["ListSnapshots"].retry, - default_timeout=self._method_configs["ListSnapshots"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_snapshots"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="snapshots", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> client.delete_snapshot(name) - - Args: - name (str): The unique name of the snapshot to be deleted. Values are of the form - ``projects//instances//clusters//snapshots/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_snapshot, - default_retry=self._method_configs["DeleteSnapshot"].retry, - default_timeout=self._method_configs["DeleteSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py deleted file mode 100644 index 5e63380ae091..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ /dev/null @@ -1,130 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableTableAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "drop_row_range_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - }, - "methods": { - "CreateTable": { - "timeout_millis": 130000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "CreateTableFromSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ListTables": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetTable": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ModifyColumnFamilies": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "DropRowRange": { - "timeout_millis": 900000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "drop_row_range_params", - }, - "GenerateConsistencyToken": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "CheckConsistency": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SnapshotTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListSnapshots": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py deleted file mode 100644 index 68f25f989ba7..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class StorageType(enum.IntEnum): - """ - Storage media types for persisting Bigtable data. - - Attributes: - STORAGE_TYPE_UNSPECIFIED (int): The user did not specify a storage type. - SSD (int): Flash (SSD) storage should be used. - HDD (int): Magnetic drive (HDD) storage should be used. - """ - - STORAGE_TYPE_UNSPECIFIED = 0 - SSD = 1 - HDD = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when - updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the - cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. After a - development instance is created, it can be upgraded by updating the - instance to type ``PRODUCTION``. An instance created as a production - instance cannot be changed to a development instance. When creating a - development instance, ``serve_nodes`` on the cluster must not be set. - """ - - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Snapshot(object): - class State(enum.IntEnum): - """ - Possible states of a snapshot. - - Attributes: - STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. - READY (int): The snapshot has been successfully created and can serve all requests. - CREATING (int): The snapshot is currently being created, and may be destroyed if the - creation process encounters an error. A snapshot may not be restored to a - table while it is being created. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - -class Table(object): - class TimestampGranularity(enum.IntEnum): - """ - Possible timestamp granularities to use when keeping multiple versions - of data in a table. - - Attributes: - TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. - When specified during table creation, MILLIS will be used. - MILLIS (int): The table keeps data versioned at a granularity of 1ms. - """ - - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(enum.IntEnum): - """ - Defines a view over a table's fields. - - Attributes: - VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. - NAME_ONLY (int): Only populates ``name``. - SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication - state. - FULL (int): Populates all fields. - """ - - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - FULL = 4 - - class ClusterState(object): - class ReplicationState(enum.IntEnum): - """ - Table replication states. - - Attributes: - STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. - INITIALIZING (int): The cluster was recently created, and the table must finish copying - over pre-existing data from other clusters before it can begin - receiving live replication updates and serving Data API requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to unplanned or emergency maintenance. - READY (int): The table can serve Data API requests from this cluster. Depending on - replication delay, reads may not immediately reflect the state of the - table in other clusters. - """ - - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py deleted file mode 100644 index fa5bf0556a96..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ /dev/null @@ -1,377 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc - - -class BigtableInstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableInstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. - - Create an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateInstance - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. - - Gets information about an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetInstance - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. - - Lists information about instances in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListInstances - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - - Updates an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateInstance - - @property - def partial_update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - - Partially updates an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. - - Delete an instance from a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteInstance - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. - - Creates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateCluster - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. - - Gets information about a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. - - Lists information about clusters in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListClusters - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. - - Updates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateCluster - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. - - Deletes a cluster from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteCluster - - @property - def create_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. - - Creates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile - - @property - def get_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. - - Gets information about an app profile. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetAppProfile - - @property - def list_app_profiles(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. - - Lists information about app profiles in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles - - @property - def update_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - - Updates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile - - @property - def delete_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. - - Deletes an app profile from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py deleted file mode 100644 index d8a5bfee0d74..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc - - -class BigtableTableAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableTableAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. - - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTable - - @property - def create_table_from_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. - - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot - - @property - def list_tables(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. - - Lists all tables served from a specified instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListTables - - @property - def get_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. - - Gets metadata information about the specified table. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetTable - - @property - def delete_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. - - Permanently deletes a specified table and all of its data. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteTable - - @property - def modify_column_families(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. - - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies - - @property - def drop_row_range(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. - - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DropRowRange - - @property - def generate_consistency_token(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. - - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken - - @property - def check_consistency(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. - - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CheckConsistency - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - - Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - - Sets the access control policy on a table resource. Replaces any existing - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified table resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].TestIamPermissions - - @property - def snapshot_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SnapshotTable - - @property - def get_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. - - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetSnapshot - - @property - def list_snapshots(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. - - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListSnapshots - - @property - def delete_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py b/bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8661..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc46397f..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac8e0..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto deleted file mode 100644 index 80ce42470736..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables' metadata or data stored in those tables. -service BigtableInstanceAdmin { - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*}/instances" - body: "*" - }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*}" - }; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*}/instances" - }; - } - - // Updates an instance within a project. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*}" - body: "*" - }; - } - - // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{instance.name=projects/*/instances/*}" - body: "instance" - }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*}" - }; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/clusters" - body: "cluster" - }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/clusters" - }; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*/clusters/*}" - body: "*" - }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Creates an app profile within an instance. - rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/appProfiles" - body: "app_profile" - }; - } - - // Gets information about an app profile. - rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) - returns (ListAppProfilesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/appProfiles" - }; - } - - // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - body: "app_profile" - }; - } - - // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) - returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form `projects/`. - string parent = 1; - - // The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than - // `projects/myproject/instances/myinstance`. - string instance_id = 2; - - // The instance to create. - // Fields marked `OutputOnly` must be left blank. - Instance instance = 3; - - // The clusters to be created within the instance, mapped by desired - // cluster ID, e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fields marked `OutputOnly` must be left blank. - // Currently, at most two clusters can be specified. - map clusters = 4; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least one - // Cluster in a failed location may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.PartialUpdateInstance. -message PartialUpdateInstanceRequest { - // The Instance which will (partially) replace the current value. - Instance instance = 1; - - // The subset of Instance fields which should be replaced. - // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2; - - // The cluster to be created. - // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects//instances/`. - // Use ` = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateInstance. -message UpdateInstanceMetadata { - // The request that prompted the initiation of this UpdateInstance operation. - PartialUpdateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateCluster. -message CreateClusterMetadata { - // The request that prompted the initiation of this CreateCluster operation. - CreateClusterRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Request message for BigtableInstanceAdmin.CreateAppProfile. -message CreateAppProfileRequest { - // The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than - // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2; - - // The app profile to be created. - // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3; - - // If true, ignore safety checks when creating the app profile. - bool ignore_warnings = 4; -} - -// Request message for BigtableInstanceAdmin.GetAppProfile. -message GetAppProfileRequest { - // The unique name of the requested app profile. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesRequest { - // The unique name of the instance for which a list of app profiles is - // requested. Values are of the form - // `projects//instances/`. - // Use ` = '-'` to list AppProfiles for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 3; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesResponse { - // The list of requested app profiles. - repeated AppProfile app_profiles = 1; - - // Set if not all app profiles could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; - - // Locations from which AppProfile information could not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from `app_profiles`. - // Values are of the form `projects//locations/` - repeated string failed_locations = 3; -} - -// Request message for BigtableInstanceAdmin.UpdateAppProfile. -message UpdateAppProfileRequest { - // The app profile which will (partially) replace the current value. - AppProfile app_profile = 1; - - // The subset of app profile fields which should be replaced. - // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2; - - // If true, ignore safety checks when updating the app profile. - bool ignore_warnings = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteAppProfile. -message DeleteAppProfileRequest { - // The unique name of the app profile to be deleted. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; - - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; -} - -// The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata {} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py deleted file mode 100644 index 540e8c91b83b..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ /dev/null @@ -1,2370 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01""\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation",\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"=\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"D\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"C\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name="ClustersEntry", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=570, - serialized_end=652, -) - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.bigtable.admin.v2.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=373, - serialized_end=652, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.bigtable.admin.v2.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=654, - serialized_end=688, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.bigtable.admin.v2.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=690, - serialized_end=748, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.bigtable.admin.v2.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=751, - serialized_end=880, -) - - -_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="PartialUpdateInstanceRequest", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=883, - serialized_end=1016, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1018, - serialized_end=1055, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.bigtable.admin.v2.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1057, - serialized_end=1167, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.bigtable.admin.v2.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1169, - serialized_end=1202, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.bigtable.admin.v2.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListClustersRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1204, - serialized_end=1261, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.bigtable.admin.v2.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1263, - serialized_end=1389, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.bigtable.admin.v2.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1391, - serialized_end=1427, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1430, - serialized_end=1628, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1631, - serialized_end=1836, -) - - -_CREATECLUSTERMETADATA = _descriptor.Descriptor( - name="CreateClusterMetadata", - full_name="google.bigtable.admin.v2.CreateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1839, - serialized_end=2035, -) - - -_UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name="UpdateClusterMetadata", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2038, - serialized_end=2221, -) - - -_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="CreateAppProfileRequest", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2224, - serialized_end=2373, -) - - -_GETAPPPROFILEREQUEST = _descriptor.Descriptor( - name="GetAppProfileRequest", - full_name="google.bigtable.admin.v2.GetAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2375, - serialized_end=2411, -) - - -_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( - name="ListAppProfilesRequest", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2413, - serialized_end=2492, -) - - -_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( - name="ListAppProfilesResponse", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="app_profiles", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2495, - serialized_end=2631, -) - - -_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="UpdateAppProfileRequest", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2634, - serialized_end=2792, -) - - -_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="DeleteAppProfileRequest", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2794, - serialized_end=2858, -) - - -_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( - name="UpdateAppProfileMetadata", - full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2860, - serialized_end=2886, -) - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_CREATEINSTANCEREQUEST.fields_by_name[ - "clusters" -].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name[ - "instances" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATECLUSTERREQUEST.fields_by_name[ - "cluster" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_LISTCLUSTERSRESPONSE.fields_by_name[ - "clusters" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _PARTIALUPDATEINSTANCEREQUEST -_UPDATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATECLUSTERREQUEST -_CREATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_UPDATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_LISTAPPPROFILESRESPONSE.fields_by_name[ - "app_profiles" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name[ - "PartialUpdateInstanceRequest" -] = _PARTIALUPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - dict( - ClustersEntry=_reflection.GeneratedProtocolMessageType( - "ClustersEntry", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEREQUEST_CLUSTERSENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - ), - ), - DESCRIPTOR=_CREATEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.CreateInstance. - - - Attributes: - parent: - The unique name of the project in which to create the new - instance. Values are of the form ``projects/``. - instance_id: - The ID to be used when referring to the new instance within - its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance: - The instance to create. Fields marked ``OutputOnly`` must be - left blank. - clusters: - The clusters to be created within the instance, mapped by - desired cluster ID, e.g., just ``mycluster`` rather than ``pro - jects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently, at - most two clusters can be specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - ), -) -_sym_db.RegisterMessage(CreateInstanceRequest) -_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetInstance. - - - Attributes: - name: - The unique name of the requested instance. Values are of the - form ``projects//instances/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - ), -) -_sym_db.RegisterMessage(GetInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListInstances. - - - Attributes: - parent: - The unique name of the project for which a list of instances - is requested. Values are of the form ``projects/``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - ), -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for BigtableInstanceAdmin.ListInstances. - - - Attributes: - instances: - The list of requested instances. - failed_locations: - Locations from which Instance information could not be - retrieved, due to an outage or some other transient condition. - Instances whose Clusters are all in one of the failed - locations may be missing from ``instances``, and Instances - with at least one Cluster in a failed location may only have - partial information returned. Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - ), -) -_sym_db.RegisterMessage(ListInstancesResponse) - -PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "PartialUpdateInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_PARTIALUPDATEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - - - Attributes: - instance: - The Instance which will (partially) replace the current value. - update_mask: - The subset of Instance fields which should be replaced. Must - be explicitly set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - ), -) -_sym_db.RegisterMessage(PartialUpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.DeleteInstance. - - - Attributes: - name: - The unique name of the instance to be deleted. Values are of - the form ``projects//instances/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - ), -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATECLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.CreateCluster. - - - Attributes: - parent: - The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects//instances/``. - cluster_id: - The ID to be used when referring to the new cluster within its - instance, e.g., just ``mycluster`` rather than ``projects/mypr - oject/instances/myinstance/clusters/mycluster``. - cluster: - The cluster to be created. Fields marked ``OutputOnly`` must - be left blank. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - ), -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETCLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetCluster. - - - Attributes: - name: - The unique name of the requested cluster. Values are of the - form ``projects//instances//clusters/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - ), -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTCLUSTERSREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListClusters. - - - Attributes: - parent: - The unique name of the instance for which a list of clusters - is requested. Values are of the form - ``projects//instances/``. Use `` - = '-'`` to list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - ), -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTCLUSTERSRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for BigtableInstanceAdmin.ListClusters. - - - Attributes: - clusters: - The list of requested clusters. - failed_locations: - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient condition. - Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - ), -) -_sym_db.RegisterMessage(ListClustersResponse) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETECLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.DeleteCluster. - - - Attributes: - name: - The unique name of the cluster to be deleted. Values are of - the form ``projects//instances//clusters/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - ), -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by CreateInstance. - - - Attributes: - original_request: - The request that prompted the initiation of this - CreateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - ), -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEINSTANCEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by UpdateInstance. - - - Attributes: - original_request: - The request that prompted the initiation of this - UpdateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - ), -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - -CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "CreateClusterMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_CREATECLUSTERMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by CreateCluster. - - - Attributes: - original_request: - The request that prompted the initiation of this CreateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - ), -) -_sym_db.RegisterMessage(CreateClusterMetadata) - -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateClusterMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATECLUSTERMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by UpdateCluster. - - - Attributes: - original_request: - The request that prompted the initiation of this UpdateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - ), -) -_sym_db.RegisterMessage(UpdateClusterMetadata) - -CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "CreateAppProfileRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.CreateAppProfile. - - - Attributes: - parent: - The unique name of the instance in which to create the new app - profile. Values are of the form - ``projects//instances/``. - app_profile_id: - The ID to be used when referring to the new app profile within - its instance, e.g., just ``myprofile`` rather than ``projects/ - myproject/instances/myinstance/appProfiles/myprofile``. - app_profile: - The app profile to be created. Fields marked ``OutputOnly`` - will be ignored. - ignore_warnings: - If true, ignore safety checks when creating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - ), -) -_sym_db.RegisterMessage(CreateAppProfileRequest) - -GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "GetAppProfileRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetAppProfile. - - - Attributes: - name: - The unique name of the requested app profile. Values are of - the form ``projects//instances//appProfiles - /``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - ), -) -_sym_db.RegisterMessage(GetAppProfileRequest) - -ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTAPPPROFILESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListAppProfiles. - - - Attributes: - parent: - The unique name of the instance for which a list of app - profiles is requested. Values are of the form - ``projects//instances/``. Use `` - = '-'`` to list AppProfiles for all Instances in a project, - e.g., ``projects/myproject/instances/-``. - page_size: - Maximum number of results per page. CURRENTLY UNIMPLEMENTED - AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - ), -) -_sym_db.RegisterMessage(ListAppProfilesRequest) - -ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTAPPPROFILESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for - BigtableInstanceAdmin.ListAppProfiles. - - - Attributes: - app_profiles: - The list of requested app profiles. - next_page_token: - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - failed_locations: - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient condition. - AppProfiles from these locations may be missing from - ``app_profiles``. Values are of the form - ``projects//locations/`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - ), -) -_sym_db.RegisterMessage(ListAppProfilesResponse) - -UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileRequest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.UpdateAppProfile. - - - Attributes: - app_profile: - The app profile which will (partially) replace the current - value. - update_mask: - The subset of app profile fields which should be replaced. If - unset, all fields will be replaced. - ignore_warnings: - If true, ignore safety checks when updating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - ), -) -_sym_db.RegisterMessage(UpdateAppProfileRequest) - -DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAppProfileRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.DeleteAppProfile. - - - Attributes: - name: - The unique name of the app profile to be deleted. Values are - of the form ``projects//instances//appProfi - les/``. - ignore_warnings: - If true, ignore safety checks when deleting the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - ), -) -_sym_db.RegisterMessage(DeleteAppProfileRequest) - -UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEAPPPROFILEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by - UpdateAppProfile. - - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - ), -) -_sym_db.RegisterMessage(UpdateAppProfileMetadata) - - -DESCRIPTOR._options = None -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None - -_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( - name="BigtableInstanceAdmin", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=None, - serialized_start=2889, - serialized_end=5875, - methods=[ - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - index=0, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - index=1, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}" - ), - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances" - ), - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - index=3, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=_b( - "\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*" - ), - ), - _descriptor.MethodDescriptor( - name="PartialUpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - index=4, - containing_service=None, - input_type=_PARTIALUPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - index=5, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}" - ), - ), - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - index=6, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster' - ), - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - index=7, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - serialized_options=_b( - "\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}" - ), - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - index=8, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters" - ), - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - index=9, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - index=10, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}" - ), - ), - _descriptor.MethodDescriptor( - name="CreateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - index=11, - containing_service=None, - input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=_b( - '\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile' - ), - ), - _descriptor.MethodDescriptor( - name="GetAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - index=12, - containing_service=None, - input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=_b( - "\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}" - ), - ), - _descriptor.MethodDescriptor( - name="ListAppProfiles", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - index=13, - containing_service=None, - input_type=_LISTAPPPROFILESREQUEST, - output_type=_LISTAPPPROFILESRESPONSE, - serialized_options=_b( - "\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles" - ), - ), - _descriptor.MethodDescriptor( - name="UpdateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - index=14, - containing_service=None, - input_type=_UPDATEAPPPROFILEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - index=15, - containing_service=None, - input_type=_DELETEAPPPROFILEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}" - ), - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - index=16, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - index=17, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - index=18, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) - -DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py deleted file mode 100644 index 0ca0445e22db..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,370 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.PartialUpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.GetAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.ListAppProfiles = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - ) - self.UpdateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def CreateInstance(self, request, context): - """Create an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists information about instances in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Delete an instance from a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateCluster(self, request, context): - """Creates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets information about a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists information about clusters in an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateAppProfile(self, request, context): - """Creates an app profile within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAppProfile(self, request, context): - """Gets information about an app profile. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "GetAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "ListAppProfiles": grpc.unary_unary_rpc_method_handler( - servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, - ), - "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto deleted file mode 100644 index 812022295950..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// -// -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables" - body: "*" - }; - } - - // Creates a new table from the specified snapshot. The target table must - // not exist. The snapshot and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - body: "*" - }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/tables" - }; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Performs a series of column family modifications on the specified table. - // Either all or none of the modifications will occur before this method - // returns, but data requests received prior to that point may see a table - // where only some modifications have taken effect. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - body: "*" - }; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" - body: "*" - }; - } - - // Generates a consistency token for a Table, which can be used in - // CheckConsistency to check whether mutations to the table that finished - // before this call started have been replicated. The tokens will be available - // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) - returns (GenerateConsistencyTokenResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - body: "*" - }; - } - - // Checks replication consistency based on a consistency token, that is, if - // replication has caught up based on the conditions specified in the token - // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) - returns (CheckConsistencyResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - body: "*" - }; - } - - // Creates a new snapshot in the specified cluster from the specified - // source table. The cluster and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc SnapshotTable(SnapshotTableRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" - body: "*" - }; - } - - // Gets metadata information about the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } - - // Lists all snapshots associated with the specified cluster. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - }; - } - - // Permanently deletes the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } - - // Gets the access control policy for a table resource. Returns an empty - // policy if an table exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" - body: "*" - }; - } - - // Sets the access control policy on a table resource. Replaces any existing - // policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified table resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" - body: "*" - }; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The Table to create. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - repeated Split initial_splits = 4; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotRequest { - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string source_snapshot = 3; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Delete all rows or by prefix. - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The view to be applied to the returned tables' fields. - // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. - Table.View view = 2; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 4; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested instance. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // The unique name of the requested table. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The view to be applied to the returned table's fields. - // Defaults to `SCHEMA_VIEW` if unspecified. - Table.View view = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // The unique name of the table to be deleted. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - // Column familiy modifications. - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // The unique name of the table whose families should be modified. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenRequest { - // The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenResponse { - // The generated consistency token. - string consistency_token = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyRequest { - // The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyResponse { - // True only if the token is consistent. A token is consistent if replication - // has caught up with the restrictions specified in the request. - bool consistent = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableRequest { - // The unique name of the table to have the snapshot taken. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The name of the cluster where the snapshot will be created in. - // Values are of the form - // `projects//instances//clusters/`. - string cluster = 2; - - // The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects//instances//clusters//snapshots/mysnapshot`. - string snapshot_id = 3; - - // The amount of time that the new snapshot can stay active after it is - // created. Once 'ttl' expires, the snapshot will get deleted. The maximum - // amount of time a snapshot can stay active is 7 days. If 'ttl' is not - // specified, the default value of 24 hours will be used. - google.protobuf.Duration ttl = 4; - - // Description of the snapshot. - string description = 5; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message GetSnapshotRequest { - // The unique name of the requested snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsRequest { - // The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects//instances//clusters/`. - // Use ` = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects//instances//clusters/-`. - string parent = 1; - - // The maximum number of snapshots to return per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsResponse { - // The snapshots present in the requested cluster. - repeated Snapshot snapshots = 1; - - // Set if not all snapshots could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message DeleteSnapshotRequest { - // The unique name of the snapshot to be deleted. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// The metadata for the Operation returned by SnapshotTable. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableMetadata { - // The request that prompted the initiation of this SnapshotTable operation. - SnapshotTableRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateTableFromSnapshot. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotMetadata { - // The request that prompted the initiation of this CreateTableFromSnapshot - // operation. - CreateTableFromSnapshotRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py deleted file mode 100644 index 6852607952f3..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ /dev/null @@ -1,2291 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x99\x15\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\x91\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\x12\x91\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*\x12\xb7\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"L\x82\xd3\xe4\x93\x02\x46"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name="Split", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=545, - serialized_end=565, -) - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name="CreateTableRequest", - full_name="google.bigtable.admin.v2.CreateTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="table", - full_name="google.bigtable.admin.v2.CreateTableRequest.table", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="initial_splits", - full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_CREATETABLEREQUEST_SPLIT,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=365, - serialized_end=565, -) - - -_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( - name="CreateTableFromSnapshotRequest", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="source_snapshot", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=567, - serialized_end=658, -) - - -_DROPROWRANGEREQUEST = _descriptor.Descriptor( - name="DropRowRangeRequest", - full_name="google.bigtable.admin.v2.DropRowRangeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_key_prefix", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete_all_data_from_table", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=660, - serialized_end=769, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name="ListTablesRequest", - full_name="google.bigtable.admin.v2.ListTablesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListTablesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.ListTablesRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=771, - serialized_end=897, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name="ListTablesResponse", - full_name="google.bigtable.admin.v2.ListTablesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="tables", - full_name="google.bigtable.admin.v2.ListTablesResponse.tables", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=899, - serialized_end=993, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name="GetTableRequest", - full_name="google.bigtable.admin.v2.GetTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.GetTableRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=995, - serialized_end=1078, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name="DeleteTableRequest", - full_name="google.bigtable.admin.v2.DeleteTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1080, - serialized_end=1114, -) - - -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name="Modification", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="drop", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mod", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=1254, - serialized_end=1419, -) - -_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name="ModifyColumnFamiliesRequest", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="modifications", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1117, - serialized_end=1419, -) - - -_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( - name="GenerateConsistencyTokenRequest", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1421, - serialized_end=1468, -) - - -_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( - name="GenerateConsistencyTokenResponse", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1470, - serialized_end=1531, -) - - -_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( - name="CheckConsistencyRequest", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1533, - serialized_end=1599, -) - - -_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( - name="CheckConsistencyResponse", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="consistent", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1601, - serialized_end=1647, -) - - -_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( - name="SnapshotTableRequest", - full_name="google.bigtable.admin.v2.SnapshotTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="snapshot_id", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ttl", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1650, - serialized_end=1785, -) - - -_GETSNAPSHOTREQUEST = _descriptor.Descriptor( - name="GetSnapshotRequest", - full_name="google.bigtable.admin.v2.GetSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1787, - serialized_end=1821, -) - - -_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( - name="ListSnapshotsRequest", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1823, - serialized_end=1900, -) - - -_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( - name="ListSnapshotsResponse", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="snapshots", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1902, - serialized_end=2005, -) - - -_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( - name="DeleteSnapshotRequest", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2007, - serialized_end=2044, -) - - -_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( - name="SnapshotTableMetadata", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2047, - serialized_end=2243, -) - - -_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( - name="CreateTableFromSnapshotMetadata", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2246, - serialized_end=2462, -) - -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name[ - "table" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -) -_CREATETABLEREQUEST.fields_by_name[ - "initial_splits" -].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "row_key_prefix" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "delete_all_data_from_table" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_LISTTABLESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_LISTTABLESRESPONSE.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -) -_GETTABLEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "drop" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ - "modifications" -].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name[ - "ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name[ - "snapshots" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -) -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "original_request" -].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotRequest" -] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "ModifyColumnFamiliesRequest" -] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenRequest" -] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenResponse" -] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotMetadata" -] = _CREATETABLEFROMSNAPSHOTMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateTableRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableRequest", - (_message.Message,), - dict( - Split=_reflection.GeneratedProtocolMessageType( - "Split", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEREQUEST_SPLIT, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""An initial split point for a newly created table. - - - Attributes: - key: - Row key to use as an initial tablet boundary. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - ), - ), - DESCRIPTOR=_CREATETABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - - Attributes: - parent: - The unique name of the instance in which to create the table. - Values are of the form - ``projects//instances/``. - table_id: - The name by which the new table should be referred to within - the parent instance, e.g., ``foobar`` rather than - ``/tables/foobar``. - table: - The Table to create. - initial_splits: - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, three - tablets will be created, spanning the key ranges: ``[, s1), - [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", - "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial\_split\_keys := ``["apple", "customer_1", - "customer_2", "other"]`` - Key assignment: - Tablet 1 - ``[, apple) => {"a"}.`` - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - ), -) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) - -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEFROMSNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - parent: - The unique name of the instance in which to create the table. - Values are of the form - ``projects//instances/``. - table_id: - The name by which the new table should be referred to within - the parent instance, e.g., ``foobar`` rather than - ``/tables/foobar``. - source_snapshot: - The unique name of the snapshot from which to restore the - table. The snapshot and the table must be in the same - instance. Values are of the form ``projects//instance - s//clusters//snapshots/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - ), -) -_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) - -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( - "DropRowRangeRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DROPROWRANGEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - - Attributes: - name: - The unique name of the table on which to drop a range of rows. - Values are of the form - ``projects//instances//tables/
``. - target: - Delete all rows or by prefix. - row_key_prefix: - Delete all rows that start with this row key prefix. Prefix - cannot be zero length. - delete_all_data_from_table: - Delete all rows in the table. Setting this to false is a no- - op. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - ), -) -_sym_db.RegisterMessage(DropRowRangeRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType( - "ListTablesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - - Attributes: - parent: - The unique name of the instance for which tables should be - listed. Values are of the form - ``projects//instances/``. - view: - The view to be applied to the returned tables' fields. - Defaults to ``NAME_ONLY`` if unspecified; no others are - currently supported. - page_size: - Maximum number of results per page. CURRENTLY UNIMPLEMENTED - AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - ), -) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType( - "ListTablesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - - Attributes: - tables: - The tables present in the requested instance. - next_page_token: - Set if not all tables could be returned in a single response. - Pass this value to ``page_token`` in another request to get - the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - ), -) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType( - "GetTableRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETTABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - - Attributes: - name: - The unique name of the requested table. Values are of the form - ``projects//instances//tables/
``. - view: - The view to be applied to the returned table's fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - ), -) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType( - "DeleteTableRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETETABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - - Attributes: - name: - The unique name of the table to be deleted. Values are of the - form - ``projects//instances//tables/
``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - ), -) -_sym_db.RegisterMessage(DeleteTableRequest) - -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( - "ModifyColumnFamiliesRequest", - (_message.Message,), - dict( - Modification=_reflection.GeneratedProtocolMessageType( - "Modification", - (_message.Message,), - dict( - DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""A create, update, or delete of a particular column family. - - - Attributes: - id: - The ID of the column family to be modified. - mod: - Column familiy modifications. - create: - Create a new column family with the specified schema, or fail - if one already exists with the given ID. - update: - Update an existing column family to the specified schema, or - fail if no column family exists with the given ID. - drop: - Drop (delete) the column family with the given ID, or fail if - no such family exists. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - ), - ), - DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - - - Attributes: - name: - The unique name of the table whose families should be - modified. Values are of the form - ``projects//instances//tables/
``. - modifications: - Modifications to be atomically applied to the specified - table's families. Entries are applied in order, meaning that - earlier modifications can be masked by later ones (in the case - of repeated updates to the same family, for example). - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - ), -) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) - -GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GENERATECONSISTENCYTOKENREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - - Attributes: - name: - The unique name of the Table for which to create a consistency - token. Values are of the form - ``projects//instances//tables/
``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - ), -) -_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) - -GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenResponse", - (_message.Message,), - dict( - DESCRIPTOR=_GENERATECONSISTENCYTOKENRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - - Attributes: - consistency_token: - The generated consistency token. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - ), -) -_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) - -CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CHECKCONSISTENCYREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - - Attributes: - name: - The unique name of the Table for which to check replication - consistency. Values are of the form - ``projects//instances//tables/
``. - consistency_token: - The token created using GenerateConsistencyToken for the - Table. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - ), -) -_sym_db.RegisterMessage(CheckConsistencyRequest) - -CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyResponse", - (_message.Message,), - dict( - DESCRIPTOR=_CHECKCONSISTENCYRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - - Attributes: - consistent: - True only if the token is consistent. A token is consistent if - replication has caught up with the restrictions specified in - the request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - ), -) -_sym_db.RegisterMessage(CheckConsistencyResponse) - -SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( - "SnapshotTableRequest", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOTTABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - name: - The unique name of the table to have the snapshot taken. - Values are of the form - ``projects//instances//tables/
``. - cluster: - The name of the cluster where the snapshot will be created in. - Values are of the form ``projects//instances//clusters/``. - snapshot_id: - The ID by which the new snapshot should be referred to within - the parent cluster, e.g., ``mysnapshot`` of the form: ``[_a- - zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects//ins - tances//clusters//snapshots/mysnapshot``. - ttl: - The amount of time that the new snapshot can stay active after - it is created. Once 'ttl' expires, the snapshot will get - deleted. The maximum amount of time a snapshot can stay active - is 7 days. If 'ttl' is not specified, the default value of 24 - hours will be used. - description: - Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - ), -) -_sym_db.RegisterMessage(SnapshotTableRequest) - -GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "GetSnapshotRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETSNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - name: - The unique name of the requested snapshot. Values are of the - form ``projects//instances//clusters//snapshots/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - ), -) -_sym_db.RegisterMessage(GetSnapshotRequest) - -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTSNAPSHOTSREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - parent: - The unique name of the cluster for which snapshots should be - listed. Values are of the form ``projects//instances/ - /clusters/``. Use `` = '-'`` to - list snapshots for all clusters in an instance, e.g., - ``projects//instances//clusters/-``. - page_size: - The maximum number of snapshots to return per page. CURRENTLY - UNIMPLEMENTED AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - ), -) -_sym_db.RegisterMessage(ListSnapshotsRequest) - -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTSNAPSHOTSRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - snapshots: - The snapshots present in the requested cluster. - next_page_token: - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - ), -) -_sym_db.RegisterMessage(ListSnapshotsResponse) - -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSnapshotRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETESNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - name: - The unique name of the snapshot to be deleted. Values are of - the form ``projects//instances//clusters//snapshots/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - ), -) -_sym_db.RegisterMessage(DeleteSnapshotRequest) - -SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( - "SnapshotTableMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOTTABLEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""The metadata for the Operation returned by SnapshotTable. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - original_request: - The request that prompted the initiation of this SnapshotTable - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - ), -) -_sym_db.RegisterMessage(SnapshotTableMetadata) - -CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEFROMSNAPSHOTMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""The metadata for the Operation returned by - CreateTableFromSnapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - original_request: - The request that prompted the initiation of this - CreateTableFromSnapshot operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - ), -) -_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) - - -DESCRIPTOR._options = None - -_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( - name="BigtableTableAdmin", - full_name="google.bigtable.admin.v2.BigtableTableAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=None, - serialized_start=2465, - serialized_end=5178, - methods=[ - _descriptor.MethodDescriptor( - name="CreateTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - index=0, - containing_service=None, - input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - '\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="CreateTableFromSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - index=1, - containing_service=None, - input_type=_CREATETABLEFROMSNAPSHOTREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="ListTables", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - index=2, - containing_service=None, - input_type=_LISTTABLESREQUEST, - output_type=_LISTTABLESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables" - ), - ), - _descriptor.MethodDescriptor( - name="GetTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - index=3, - containing_service=None, - input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - index=4, - containing_service=None, - input_type=_DELETETABLEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}" - ), - ), - _descriptor.MethodDescriptor( - name="ModifyColumnFamilies", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - index=5, - containing_service=None, - input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - '\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="DropRowRange", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - index=6, - containing_service=None, - input_type=_DROPROWRANGEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - '\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="GenerateConsistencyToken", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - index=7, - containing_service=None, - input_type=_GENERATECONSISTENCYTOKENREQUEST, - output_type=_GENERATECONSISTENCYTOKENRESPONSE, - serialized_options=_b( - '\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="CheckConsistency", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - index=8, - containing_service=None, - input_type=_CHECKCONSISTENCYREQUEST, - output_type=_CHECKCONSISTENCYRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="SnapshotTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - index=9, - containing_service=None, - input_type=_SNAPSHOTTABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="GetSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - index=10, - containing_service=None, - input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - serialized_options=_b( - "\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), - ), - _descriptor.MethodDescriptor( - name="ListSnapshots", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - index=11, - containing_service=None, - input_type=_LISTSNAPSHOTSREQUEST, - output_type=_LISTSNAPSHOTSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - index=12, - containing_service=None, - input_type=_DELETESNAPSHOTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - index=13, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - index=14, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - index=15, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002F"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) - -DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py deleted file mode 100644 index f152581fe0a2..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ /dev/null @@ -1,367 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.CreateTableFromSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListTables = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - ) - self.GetTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DeleteTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ModifyColumnFamilies = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DropRowRange = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GenerateConsistencyToken = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - ) - self.CheckConsistency = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - ) - self.SnapshotTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - ) - self.ListSnapshots = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - ) - self.DeleteSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateTableFromSnapshot(self, request, context): - """Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTables(self, request, context): - """Lists all tables served from a specified instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTable(self, request, context): - """Gets metadata information about the specified table. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ModifyColumnFamilies(self, request, context): - """Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GenerateConsistencyToken(self, request, context): - """Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckConsistency(self, request, context): - """Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SnapshotTable(self, request, context): - """Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSnapshot(self, request, context): - """Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSnapshots(self, request, context): - """Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSnapshot(self, request, context): - """Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a table resource. Replaces any existing - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified table resource. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateTable": grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListTables": grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, - ), - "GetTable": grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DeleteTable": grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DropRowRange": grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( - servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, - ), - "CheckConsistency": grpc.unary_unary_rpc_method_handler( - servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, - ), - "SnapshotTable": grpc.unary_unary_rpc_method_handler( - servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, - ), - "ListSnapshots": grpc.unary_unary_rpc_method_handler( - servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, - ), - "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f560e..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee17c1..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede65592f..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/common.proto deleted file mode 100644 index ad4d735994f3..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/common.proto +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py deleted file mode 100644 index 7d40f043d05c..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/common.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/common.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - "\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3" - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_STORAGETYPE = _descriptor.EnumDescriptor( - name="StorageType", - full_name="google.bigtable.admin.v2.StorageType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STORAGE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="SSD", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="HDD", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=142, - serialized_end=203, -) -_sym_db.RegisterEnumDescriptor(_STORAGETYPE) - -StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) -STORAGE_TYPE_UNSPECIFIED = 0 -SSD = 1 -HDD = 2 - - -DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto deleted file mode 100644 index ef8599bfe349..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. -message Instance { - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // The type of the instance. - enum Type { - // The type of the instance is unspecified. If set when creating an - // instance, a `PRODUCTION` instance will be created. If set when updating - // an instance, the type will be left unchanged. - TYPE_UNSPECIFIED = 0; - - // An instance meant for production use. `serve_nodes` must be set - // on the cluster. - PRODUCTION = 1; - - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. - DEVELOPMENT = 2; - } - - // (`OutputOnly`) - // The unique name of the instance. Values are of the form - // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1; - - // The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2; - - // (`OutputOnly`) - // The current state of the instance. - State state = 3; - - // The type of the instance. Defaults to `PRODUCTION`. - Type type = 4; - - // Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. They can be used to filter resources and aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. - map labels = 5; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // (`OutputOnly`) - // The unique name of the cluster. Values are of the form - // `projects//instances//clusters/[a-z][-a-z0-9]*`. - string name = 1; - - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this - // cluster. Currently only zones are supported, so values should be of the - // form `projects//locations/`. - string location = 2; - - // (`OutputOnly`) - // The current state of the cluster. - State state = 3; - - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; - - // (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; -} - -// A configuration object describing how Cloud Bigtable should treat traffic -// from a particular end user application. -message AppProfile { - // Read/write requests may be routed to any cluster in the instance, and will - // fail over to another cluster in the event of transient errors or delays. - // Choosing this option sacrifices read-your-writes consistency to improve - // availability. - message MultiClusterRoutingUseAny {} - - // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency, but does not improve - // availability. - message SingleClusterRouting { - // The cluster to which read/write requests should be routed. - string cluster_id = 1; - - // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests to - // the same table/row/column in multiple clusters. - bool allow_transactional_writes = 2; - } - - // (`OutputOnly`) - // The unique name of the app profile. Values are of the form - // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - string name = 1; - - // Strongly validated etag for optimistic concurrency control. Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` to - // fail the request if there has been a modification in the mean time. The - // `update_mask` of the request need not include `etag` for this protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more - // details. - string etag = 2; - - // Optional long form description of the use case for this AppProfile. - string description = 3; - - // The routing policy for all read/write requests which use this app profile. - // A value must be explicitly set. - oneof routing_policy { - // Use a multi-cluster routing policy that may pick any cluster. - MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; - - // Use a single-cluster routing policy. - SingleClusterRouting single_cluster_routing = 6; - } -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py deleted file mode 100644 index 5f45909fc6a1..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ /dev/null @@ -1,816 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/instance.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/instance.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - ], -) - - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Instance.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=434, - serialized_end=487, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - -_INSTANCE_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.bigtable.admin.v2.Instance.Type", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="PRODUCTION", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=489, - serialized_end=550, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) - -_CLUSTER_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Cluster.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="RESIZING", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DISABLED", index=4, number=4, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=742, - serialized_end=823, -) -_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=387, - serialized_end=432, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.bigtable.admin.v2.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.bigtable.admin.v2.Instance.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Instance.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.bigtable.admin.v2.Instance.type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.admin.v2.Instance.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_INSTANCE_LABELSENTRY,], - enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=163, - serialized_end=550, -) - - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.bigtable.admin.v2.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Cluster.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.bigtable.admin.v2.Cluster.location", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Cluster.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="serve_nodes", - full_name="google.bigtable.admin.v2.Cluster.serve_nodes", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="default_storage_type", - full_name="google.bigtable.admin.v2.Cluster.default_storage_type", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTER_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=553, - serialized_end=823, -) - - -_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( - name="MultiClusterRoutingUseAny", - full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1087, - serialized_end=1114, -) - -_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( - name="SingleClusterRouting", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="allow_transactional_writes", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1116, - serialized_end=1194, -) - -_APPPROFILE = _descriptor.Descriptor( - name="AppProfile", - full_name="google.bigtable.admin.v2.AppProfile", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.AppProfile.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.bigtable.admin.v2.AppProfile.etag", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.AppProfile.description", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="multi_cluster_routing_use_any", - full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="single_cluster_routing", - full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - _APPPROFILE_SINGLECLUSTERROUTING, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="routing_policy", - full_name="google.bigtable.admin.v2.AppProfile.routing_policy", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=826, - serialized_end=1212, -) - -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_INSTANCE_TYPE.containing_type = _INSTANCE -_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name[ - "default_storage_type" -].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2._STORAGETYPE -) -_CLUSTER_STATE.containing_type = _CLUSTER -_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE -_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].message_type = _APPPROFILE_SINGLECLUSTERROUTING -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] -) -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["single_cluster_routing"] -) -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - dict( - LabelsEntry=_reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_INSTANCE_LABELSENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - ), - ), - DESCRIPTOR=_INSTANCE, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the resources that serve - them. All tables in an instance are served from a single - [Cluster][google.bigtable.admin.v2.Cluster]. - - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the instance. Values are - of the form - ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name: - The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - state: - (\ ``OutputOnly``) The current state of the instance. - type: - The type of the instance. Defaults to ``PRODUCTION``. - labels: - Labels are a flexible and lightweight mechanism for organizing - cloud resources into groups that reflect a customer's - organizational needs and deployment strategies. They can be - used to filter resources and aggregate metrics. - Label keys - must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values - must be between 0 and 63 characters long and must conform - to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - Keys and values must both be under 128 bytes. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - ), -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - dict( - DESCRIPTOR=_CLUSTER, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A resizable group of nodes in a particular cloud location, - capable of serving all [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the cluster. Values are - of the form ``projects//instances//clusters - /[a-z][-a-z0-9]*``. - location: - (\ ``CreationOnly``) The location where this cluster's nodes - and storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects//locations/``. - state: - (\ ``OutputOnly``) The current state of the cluster. - serve_nodes: - The number of nodes allocated to this cluster. More nodes - enable higher throughput and more consistent performance. - default_storage_type: - (\ ``CreationOnly``) The type of storage used by this cluster - to serve its parent instance's tables, unless explicitly - overridden. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - ), -) -_sym_db.RegisterMessage(Cluster) - -AppProfile = _reflection.GeneratedProtocolMessageType( - "AppProfile", - (_message.Message,), - dict( - MultiClusterRoutingUseAny=_reflection.GeneratedProtocolMessageType( - "MultiClusterRoutingUseAny", - (_message.Message,), - dict( - DESCRIPTOR=_APPPROFILE_MULTICLUSTERROUTINGUSEANY, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Read/write requests may be routed to any cluster in the - instance, and will fail over to another cluster in the event of - transient errors or delays. Choosing this option sacrifices - read-your-writes consistency to improve availability. - - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - ), - ), - SingleClusterRouting=_reflection.GeneratedProtocolMessageType( - "SingleClusterRouting", - (_message.Message,), - dict( - DESCRIPTOR=_APPPROFILE_SINGLECLUSTERROUTING, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Unconditionally routes all read/write requests to a - specific cluster. This option preserves read-your-writes consistency, - but does not improve availability. - - - Attributes: - cluster_id: - The cluster to which read/write requests should be routed. - allow_transactional_writes: - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - ), - ), - DESCRIPTOR=_APPPROFILE, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A configuration object describing how Cloud Bigtable - should treat traffic from a particular end user application. - - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the app profile. Values - are of the form - ``projects//instances//appProfiles/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. - etag: - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there has - been a modification in the mean time. The ``update_mask`` of - the request need not include ``etag`` for this protection to - apply. See `Wikipedia - `__ and `RFC 7232 - `__ for more - details. - description: - Optional long form description of the use case for this - AppProfile. - routing_policy: - The routing policy for all read/write requests which use this - app profile. A value must be explicitly set. - multi_cluster_routing_use_any: - Use a multi-cluster routing policy that may pick any cluster. - single_cluster_routing: - Use a single-cluster routing policy. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - ), -) -_sym_db.RegisterMessage(AppProfile) -_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) -_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) - - -DESCRIPTOR._options = None -_INSTANCE_LABELSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/bigtable/google/cloud/bigtable_admin_v2/proto/table.proto deleted file mode 100644 index 5019d8b86448..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - // The state of a table's data in a particular cluster. - message ClusterState { - // Table replication states. - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving Data API requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve Data API requests from this cluster. Depending on - // replication delay, reads may not immediately reflect the state of the - // table in other clusters. - READY = 4; - } - - // (`OutputOnly`) - // The state of replication for the table in this cluster. - ReplicationState replication_state = 1; - } - - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Only populates `name` and fields related to the table's - // replication state. - REPLICATION_VIEW = 3; - - // Populates all fields. - FULL = 4; - } - - // (`OutputOnly`) - // The unique name of the table. Values are of the form - // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` - string name = 1; - - // (`OutputOnly`) - // Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` - map cluster_states = 2; - - // (`CreationOnly`) - // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` - map column_families = 3; - - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL` - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - // Garbage collection rules. - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} - -// A snapshot of a table at a particular time. A snapshot can be used as a -// checkpoint for data restoration or a data source for a new table. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message Snapshot { - // Possible states of a snapshot. - enum State { - // The state of the snapshot could not be determined. - STATE_NOT_KNOWN = 0; - - // The snapshot has been successfully created and can serve all requests. - READY = 1; - - // The snapshot is currently being created, and may be destroyed if the - // creation process encounters an error. A snapshot may not be restored to a - // table while it is being created. - CREATING = 2; - } - - // (`OutputOnly`) - // The unique name of the snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; - - // (`OutputOnly`) - // The source table at the time the snapshot was taken. - Table source_table = 2; - - // (`OutputOnly`) - // The size of the data in the source table at the time the snapshot was - // taken. In some cases, this value may be computed asynchronously via a - // background process and a placeholder of 0 will be used in the meantime. - int64 data_size_bytes = 3; - - // (`OutputOnly`) - // The time when the snapshot is created. - google.protobuf.Timestamp create_time = 4; - - // (`OutputOnly`) - // The time when the snapshot will be deleted. The maximum amount of time a - // snapshot can stay active is 365 days. If 'ttl' is not specified, - // the default maximum of 365 days will be used. - google.protobuf.Timestamp delete_time = 5; - - // (`OutputOnly`) - // The current state of the snapshot. - State state = 6; - - // (`OutputOnly`) - // Description of the snapshot. - string description = 7; -} diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py deleted file mode 100644 index b026dff95f39..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ /dev/null @@ -1,1048 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/table.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/table.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name="ReplicationState", - full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="INITIALIZING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="PLANNED_MAINTENANCE", - index=2, - number=2, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="UNPLANNED_MAINTENANCE", - index=3, - number=3, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="READY", index=4, number=4, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=533, - serialized_end=653, -) -_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name="TimestampGranularity", - full_name="google.bigtable.admin.v2.Table.TimestampGranularity", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="TIMESTAMP_GRANULARITY_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="MILLIS", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=850, - serialized_end=923, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - -_TABLE_VIEW = _descriptor.EnumDescriptor( - name="View", - full_name="google.bigtable.admin.v2.Table.View", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="NAME_ONLY", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REPLICATION_VIEW", - index=3, - number=3, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="FULL", index=4, number=4, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=925, - serialized_end=1017, -) -_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) - -_SNAPSHOT_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Snapshot.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1713, - serialized_end=1766, -) -_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) - - -_TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name="ClusterState", - full_name="google.bigtable.admin.v2.Table.ClusterState", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="replication_state", - full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=427, - serialized_end=653, -) - -_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name="ClusterStatesEntry", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=655, - serialized_end=753, -) - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name="ColumnFamiliesEntry", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=755, - serialized_end=848, -) - -_TABLE = _descriptor.Descriptor( - name="Table", - full_name="google.bigtable.admin.v2.Table", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Table.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cluster_states", - full_name="google.bigtable.admin.v2.Table.cluster_states", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_families", - full_name="google.bigtable.admin.v2.Table.column_families", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="granularity", - full_name="google.bigtable.admin.v2.Table.granularity", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _TABLE_CLUSTERSTATE, - _TABLE_CLUSTERSTATESENTRY, - _TABLE_COLUMNFAMILIESENTRY, - ], - enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=174, - serialized_end=1017, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name="ColumnFamily", - full_name="google.bigtable.admin.v2.ColumnFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="gc_rule", - full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1019, - serialized_end=1084, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name="Intersection", - full_name="google.bigtable.admin.v2.GcRule.Intersection", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1299, - serialized_end=1362, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name="Union", - full_name="google.bigtable.admin.v2.GcRule.Union", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Union.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1364, - serialized_end=1420, -) - -_GCRULE = _descriptor.Descriptor( - name="GcRule", - full_name="google.bigtable.admin.v2.GcRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="max_num_versions", - full_name="google.bigtable.admin.v2.GcRule.max_num_versions", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="max_age", - full_name="google.bigtable.admin.v2.GcRule.max_age", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="intersection", - full_name="google.bigtable.admin.v2.GcRule.intersection", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="union", - full_name="google.bigtable.admin.v2.GcRule.union", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.admin.v2.GcRule.rule", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=1087, - serialized_end=1428, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="google.bigtable.admin.v2.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Snapshot.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Snapshot.source_table", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="data_size_bytes", - full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.bigtable.admin.v2.Snapshot.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete_time", - full_name="google.bigtable.admin.v2.Snapshot.delete_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Snapshot.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.Snapshot.description", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_SNAPSHOT_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1431, - serialized_end=1766, -) - -_TABLE_CLUSTERSTATE.fields_by_name[ - "replication_state" -].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE -_TABLE_CLUSTERSTATE.containing_type = _TABLE -_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE -_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY -_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name[ - "max_age" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) -_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ - "rule" -] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) -_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) -_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) -_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE -_SNAPSHOT.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name[ - "delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE -_SNAPSHOT_STATE.containing_type = _SNAPSHOT -DESCRIPTOR.message_types_by_name["Table"] = _TABLE -DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Table = _reflection.GeneratedProtocolMessageType( - "Table", - (_message.Message,), - dict( - ClusterState=_reflection.GeneratedProtocolMessageType( - "ClusterState", - (_message.Message,), - dict( - DESCRIPTOR=_TABLE_CLUSTERSTATE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""The state of a table's data in a particular cluster. - - - Attributes: - replication_state: - (\ ``OutputOnly``) The state of replication for the table in - this cluster. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - ), - ), - ClusterStatesEntry=_reflection.GeneratedProtocolMessageType( - "ClusterStatesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_TABLE_CLUSTERSTATESENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - ), - ), - ColumnFamiliesEntry=_reflection.GeneratedProtocolMessageType( - "ColumnFamiliesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_TABLE_COLUMNFAMILIESENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - ), - ), - DESCRIPTOR=_TABLE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A collection of user data indexed by row, column, and - timestamp. Each table is served using the resources of its parent - cluster. - - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the table. Values are of - the form ``projects//instances//tables/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, - ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states: - (\ ``OutputOnly``) Map from cluster ID to per-cluster table - state. If it could not be determined whether or not the table - has data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster with - UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, - ``FULL`` - column_families: - (\ ``CreationOnly``) The column families configured for this - table, mapped by column family ID. Views: ``SCHEMA_VIEW``, - ``FULL`` - granularity: - (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at - which timestamps are stored in this table. Timestamps not - matching the granularity will be rejected. If unspecified at - creation time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - ), -) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ClusterState) -_sym_db.RegisterMessage(Table.ClusterStatesEntry) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType( - "ColumnFamily", - (_message.Message,), - dict( - DESCRIPTOR=_COLUMNFAMILY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A set of columns within a table which share a common - configuration. - - - Attributes: - gc_rule: - Garbage collection rule specified as a protobuf. Must - serialize to at most 500 bytes. NOTE: Garbage collection - executes opportunistically in the background, and so it's - possible for reads to return a cell even if it matches the - active GC expression for its family. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - ), -) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType( - "GcRule", - (_message.Message,), - dict( - Intersection=_reflection.GeneratedProtocolMessageType( - "Intersection", - (_message.Message,), - dict( - DESCRIPTOR=_GCRULE_INTERSECTION, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching all of the given - rules. - - - Attributes: - rules: - Only delete cells which would be deleted by every element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - ), - ), - Union=_reflection.GeneratedProtocolMessageType( - "Union", - (_message.Message,), - dict( - DESCRIPTOR=_GCRULE_UNION, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching any of the given - rules. - - - Attributes: - rules: - Delete cells which would be deleted by any element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - ), - ), - DESCRIPTOR=_GCRULE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""Rule for determining which cells to delete during garbage - collection. - - - Attributes: - rule: - Garbage collection rules. - max_num_versions: - Delete all cells in a column except the most recent N. - max_age: - Delete cells in a column older than the given age. Values must - be at least one millisecond, and will be truncated to - microsecond granularity. - intersection: - Delete cells that would be deleted by every nested rule. - union: - Delete cells that would be deleted by any nested rule. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - ), -) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOT, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A snapshot of a table at a particular time. A snapshot can - be used as a checkpoint for data restoration or a data source for a new - table. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the snapshot. Values are - of the form ``projects//instances//clusters - //snapshots/``. - source_table: - (\ ``OutputOnly``) The source table at the time the snapshot - was taken. - data_size_bytes: - (\ ``OutputOnly``) The size of the data in the source table at - the time the snapshot was taken. In some cases, this value may - be computed asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time: - (\ ``OutputOnly``) The time when the snapshot is created. - delete_time: - (\ ``OutputOnly``) The time when the snapshot will be deleted. - The maximum amount of time a snapshot can stay active is 365 - days. If 'ttl' is not specified, the default maximum of 365 - days will be used. - state: - (\ ``OutputOnly``) The current state of the snapshot. - description: - (\ ``OutputOnly``) Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - ), -) -_sym_db.RegisterMessage(Snapshot) - - -DESCRIPTOR._options = None -_TABLE_CLUSTERSTATESENTRY._options = None -_TABLE_COLUMNFAMILIESENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/bigtable/google/cloud/bigtable_admin_v2/types.py b/bigtable/google/cloud/bigtable_admin_v2/types.py deleted file mode 100644 index 2b149637e634..000000000000 --- a/bigtable/google/cloud/bigtable_admin_v2/types.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 -from google.type import expr_pb2 - - -_shared_modules = [ - iam_policy_pb2, - options_pb2, - policy_pb2, - operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, - expr_pb2, -] - -_local_modules = [ - bigtable_instance_admin_pb2, - bigtable_table_admin_pb2, - instance_pb2, - table_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_admin_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/bigtable/google/cloud/bigtable_v2/__init__.py b/bigtable/google/cloud/bigtable_v2/__init__.py deleted file mode 100644 index a649c8cf4f59..000000000000 --- a/bigtable/google/cloud/bigtable_v2/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_v2 import types -from google.cloud.bigtable_v2.gapic import bigtable_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableClient(bigtable_client.BigtableClient): - __doc__ = bigtable_client.BigtableClient.__doc__ - - -__all__ = ( - "types", - "BigtableClient", -) diff --git a/bigtable/google/cloud/bigtable_v2/gapic/__init__.py b/bigtable/google/cloud/bigtable_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py deleted file mode 100644 index abe6130df88c..000000000000 --- a/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ /dev/null @@ -1,770 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.v2 Bigtable API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_v2.gapic import bigtable_client_config -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc -from google.cloud.bigtable_v2.proto import data_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version - - -class BigtableClient(object): - """Service for reading from and writing to existing Bigtable tables.""" - - SERVICE_ADDRESS = "bigtable.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.v2.Bigtable" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_grpc_transport.BigtableGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def read_rows( - self, - table_name, - app_profile_id=None, - rows=None, - filter_=None, - rows_limit=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.read_rows(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to read. Values are of - the form ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowSet` - filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, - reads the entirety of each row. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - rows_limit (long): The read will terminate after committing to N rows' worth of results. The - default (zero) is to return all results. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "read_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_rows, - default_retry=self._method_configs["ReadRows"].retry, - default_timeout=self._method_configs["ReadRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadRowsRequest( - table_name=table_name, - app_profile_id=app_profile_id, - rows=rows, - filter=filter_, - rows_limit=rows_limit, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def sample_row_keys( - self, - table_name, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.sample_row_keys(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to sample row keys. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "sample_row_keys" not in self._inner_api_calls: - self._inner_api_calls[ - "sample_row_keys" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.sample_row_keys, - default_retry=self._method_configs["SampleRowKeys"].retry, - default_timeout=self._method_configs["SampleRowKeys"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["sample_row_keys"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_row( - self, - table_name, - row_key, - mutations, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `mutations`: - >>> mutations = [] - >>> - >>> response = client.mutate_row(table_name, row_key, mutations) - - Args: - table_name (str): Required. The unique name of the table to which the mutation should be - applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the mutation should be applied. - mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied - in order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_row, - default_retry=self._method_configs["MutateRow"].retry, - default_timeout=self._method_configs["MutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowRequest( - table_name=table_name, - row_key=row_key, - mutations=mutations, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_rows( - self, - table_name, - entries, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `entries`: - >>> entries = [] - >>> - >>> for element in client.mutate_rows(table_name, entries): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table to which the mutations should be applied. - entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. - Each entry is applied as an atomic mutation, but the entries may be - applied in arbitrary order (even between entries for the same row). - At least one entry must be specified, and in total the entries can - contain at most 100000 mutations. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Entry` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_rows, - default_retry=self._method_configs["MutateRows"].retry, - default_timeout=self._method_configs["MutateRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries, app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_and_mutate_row( - self, - table_name, - row_key, - app_profile_id=None, - predicate_filter=None, - true_mutations=None, - false_mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically based on the output of a predicate Reader filter. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> response = client.check_and_mutate_row(table_name, row_key) - - Args: - table_name (str): Required. The unique name of the table to which the conditional mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending - on whether or not any results are yielded, either ``true_mutations`` or - ``false_mutations`` will be executed. If unset, checks that the row - contains any values at all. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``false_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``true_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_and_mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "check_and_mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_and_mutate_row, - default_retry=self._method_configs["CheckAndMutateRow"].retry, - default_timeout=self._method_configs["CheckAndMutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, - row_key=row_key, - app_profile_id=app_profile_id, - predicate_filter=predicate_filter, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_and_mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read_modify_write_row( - self, - table_name, - row_key, - rules, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `rules`: - >>> rules = [] - >>> - >>> response = client.read_modify_write_row(table_name, row_key, rules) - - Args: - table_name (str): Required. The unique name of the table to which the read/modify/write - rules should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. - rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed - into writes. Entries are applied in order, meaning that earlier rules will - affect the results of later ones. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_modify_write_row" not in self._inner_api_calls: - self._inner_api_calls[ - "read_modify_write_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_modify_write_row, - default_retry=self._method_configs["ReadModifyWriteRow"].retry, - default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, - row_key=row_key, - rules=rules, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_modify_write_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py deleted file mode 100644 index 3096f33e0c68..000000000000 --- a/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ /dev/null @@ -1,80 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.v2.Bigtable": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 20000, - }, - "read_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 43200000, - }, - "mutate_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "ReadRows": { - "timeout_millis": 43200000, - "retry_codes_name": "idempotent", - "retry_params_name": "read_rows_params", - }, - "SampleRowKeys": { - "timeout_millis": 20000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "MutateRow": { - "timeout_millis": 20000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "MutateRows": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "mutate_rows_params", - }, - "CheckAndMutateRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ReadModifyWriteRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py b/bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py deleted file mode 100644 index 5b2757db2d6d..000000000000 --- a/bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc - - -class BigtableGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.v2 Bigtable API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtable.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), - } - - @classmethod - def create_channel( - cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def read_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.read_rows`. - - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadRows - - @property - def sample_row_keys(self): - """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. - - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].SampleRowKeys - - @property - def mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. - - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRow - - @property - def mutate_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. - - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRows - - @property - def check_and_mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. - - Mutates a row atomically based on the output of a predicate Reader filter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].CheckAndMutateRow - - @property - def read_modify_write_row(self): - """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. - - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/bigtable/google/cloud/bigtable_v2/proto/__init__.py b/bigtable/google/cloud/bigtable_v2/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable.proto deleted file mode 100644 index c54225ed3fd9..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/bigtable/v2/data.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableProto"; -option java_package = "com.google.bigtable.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\V2"; -option (google.api.resource_definition) = { - type: "bigtable.googleapis.com/Table" - pattern: "projects/{project}/instances/{instance}/tables/{table}" -}; - -// Service for reading from and writing to existing Bigtable tables. -service Bigtable { - option (google.api.default_host) = "bigtable.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigtable.data," - "https://www.googleapis.com/auth/bigtable.data.readonly," - "https://www.googleapis.com/auth/cloud-bigtable.data," - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly," - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only"; - - // Streams back the contents of all requested rows in key order, optionally - // applying the same Reader filter to each. Depending on their size, - // rows and cells may be broken up across multiple responses, but - // atomicity of each row will still be preserved. See the - // ReadRowsResponse documentation for details. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" - body: "*" - }; - option (google.api.method_signature) = "table_name"; - option (google.api.method_signature) = "table_name,app_profile_id"; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { - get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" - }; - option (google.api.method_signature) = "table_name"; - option (google.api.method_signature) = "table_name,app_profile_id"; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by `mutation`. - rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,mutations"; - option (google.api.method_signature) = "table_name,row_key,mutations,app_profile_id"; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" - body: "*" - }; - option (google.api.method_signature) = "table_name,entries"; - option (google.api.method_signature) = "table_name,entries,app_profile_id"; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations"; - option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id"; - } - - // Modifies a row atomically on the server. The method reads the latest - // existing timestamp and value from the specified columns and writes a new - // entry based on pre-defined read/modify/write rules. The new value for the - // timestamp is the greater of the existing timestamp or the current server - // time. The method returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,rules"; - option (google.api.method_signature) = "table_name,row_key,rules,app_profile_id"; - } -} - -// Request message for Bigtable.ReadRows. -message ReadRowsRequest { - // Required. The unique name of the table from which to read. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 5; - - // The row keys and/or ranges to read. If not specified, reads from all rows. - RowSet rows = 2; - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entirety of each row. - RowFilter filter = 3; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - int64 rows_limit = 4; -} - -// Response message for Bigtable.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message CellChunk { - // The row key for this chunk of data. If the row key is empty, - // this CellChunk is a continuation of the same row as the previous - // CellChunk in the response stream, even if that CellChunk was in a - // previous ReadRowsResponse message. - bytes row_key = 1; - - // The column family name for this chunk of data. If this message - // is not present this CellChunk is a continuation of the same column - // family as the previous CellChunk. The empty string can occur as a - // column family name in a response so clients must check - // explicitly for the presence of this message, not just for - // `family_name.value` being non-empty. - google.protobuf.StringValue family_name = 2; - - // The column qualifier for this chunk of data. If this message - // is not present, this CellChunk is a continuation of the same column - // as the previous CellChunk. Column qualifiers may be empty so - // clients must check for the presence of this message, not just - // for `qualifier.value` being non-empty. - google.protobuf.BytesValue qualifier = 3; - - // The cell's stored timestamp, which also uniquely identifies it - // within its column. Values are always expressed in - // microseconds, but individual tables may set a coarser - // granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will - // only allow values of `timestamp_micros` which are multiples of - // 1000. Timestamps are only set in the first CellChunk per cell - // (for cells split into multiple chunks). - int64 timestamp_micros = 4; - - // Labels applied to the cell by a - // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - // on the first CellChunk per cell. - repeated string labels = 5; - - // The value stored in the cell. Cell values can be split across - // multiple CellChunks. In that case only the value field will be - // set in CellChunks after the first: the timestamp and labels - // will only be present in the first CellChunk, even if the first - // CellChunk came in a previous ReadRowsResponse. - bytes value = 6; - - // If this CellChunk is part of a chunked cell value and this is - // not the final chunk of that cell, value_size will be set to the - // total length of the cell value. The client can use this size - // to pre-allocate memory to hold the full cell value. - int32 value_size = 7; - - // Signals to the client concerning previous CellChunks received. - oneof row_status { - // Indicates that the client should drop all previous chunks for - // `row_key`, as it will be re-read from the beginning. - bool reset_row = 8; - - // Indicates that the client can safely process all previous chunks for - // `row_key`, as its data has been fully read. - bool commit_row = 9; - } - } - - // A collection of a row's contents as part of the read request. - repeated CellChunk chunks = 1; - - // Optionally the server might return the row key of the last row it - // has scanned. The client can use this to construct a more - // efficient retry request if needed: any row keys or portions of - // ranges less than this row key can be dropped from the request. - // This is primarily useful for cases where the server has read a - // lot of data that was filtered out since the last committed row - // key, allowing the client to skip that work on a retry. - bytes last_scanned_row_key = 2; -} - -// Request message for Bigtable.SampleRowKeys. -message SampleRowKeysRequest { - // Required. The unique name of the table from which to sample row keys. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 2; -} - -// Response message for Bigtable.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // `row_key`. Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // `offset_bytes` fields. - int64 offset_bytes = 2; -} - -// Request message for Bigtable.MutateRow. -message MutateRowRequest { - // Required. The unique name of the table to which the mutation should be applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 4; - - // Required. The key of the row to which the mutation should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for Bigtable.MutateRow. -message MutateRowResponse { - -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - // A mutation for a given row. - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Required. Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // You must specify at least one mutation. - repeated Mutation mutations = 2 [(google.api.field_behavior) = REQUIRED]; - } - - // Required. The unique name of the table to which the mutations should be applied. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 3; - - // Required. The row keys and corresponding mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries can - // contain at most 100000 mutations. - repeated Entry entries = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The result of applying a passed mutation in the original request. - message Entry { - // The index into the original request's `entries` list of the Entry - // for which a result is being reported. - int64 index = 1; - - // The result of the request Entry identified by `index`. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - google.rpc.Status status = 2; - } - - // One or more results for Entries from the batch request. - repeated Entry entries = 1; -} - -// Request message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowRequest { - // Required. The unique name of the table to which the conditional mutation should be - // applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 7; - - // Required. The key of the row to which the conditional mutation should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either `true_mutations` or - // `false_mutations` will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // yields at least one cell when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `false_mutations` is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // does not yield any cells when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `true_mutations` is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowResponse { - // Whether or not the request's `predicate_filter` yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowRequest { - // Required. The unique name of the table to which the read/modify/write rules should be - // applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 4; - - // Required. The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowResponse { - // A Row containing the new contents of all cells modified by the request. - Row row = 1; -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8661..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc46397f..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac8e0..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto deleted file mode 100644 index bd063a925f45..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto deleted file mode 100644 index ec992ea0f818..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables' metadata or data stored in those tables. -service BigtableInstanceAdmin { - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*}/instances" - body: "*" - }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*}" - }; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*}/instances" - }; - } - - // Updates an instance within a project. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*}" - body: "*" - }; - } - - // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{instance.name=projects/*/instances/*}" - body: "instance" - }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*}" - }; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/clusters" - body: "cluster" - }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/clusters" - }; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*/clusters/*}" - body: "*" - }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Creates an app profile within an instance. - rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/appProfiles" - body: "app_profile" - }; - } - - // Gets information about an app profile. - rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/appProfiles" - }; - } - - // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - body: "app_profile" - }; - } - - // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form `projects/`. - string parent = 1; - - // The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than - // `projects/myproject/instances/myinstance`. - string instance_id = 2; - - // The instance to create. - // Fields marked `OutputOnly` must be left blank. - Instance instance = 3; - - // The clusters to be created within the instance, mapped by desired - // cluster ID, e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fields marked `OutputOnly` must be left blank. - // Currently, at most two clusters can be specified. - map clusters = 4; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least one - // Cluster in a failed location may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.PartialUpdateInstance. -message PartialUpdateInstanceRequest { - // The Instance which will (partially) replace the current value. - Instance instance = 1; - - // The subset of Instance fields which should be replaced. - // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2; - - // The cluster to be created. - // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects//instances/`. - // Use ` = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateInstance. -message UpdateInstanceMetadata { - // The request that prompted the initiation of this UpdateInstance operation. - PartialUpdateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateCluster. -message CreateClusterMetadata { - // The request that prompted the initiation of this CreateCluster operation. - CreateClusterRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Request message for BigtableInstanceAdmin.CreateAppProfile. -message CreateAppProfileRequest { - // The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than - // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2; - - // The app profile to be created. - // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3; - - // If true, ignore safety checks when creating the app profile. - bool ignore_warnings = 4; -} - -// Request message for BigtableInstanceAdmin.GetAppProfile. -message GetAppProfileRequest { - // The unique name of the requested app profile. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesRequest { - // The unique name of the instance for which a list of app profiles is - // requested. Values are of the form - // `projects//instances/`. - // Use ` = '-'` to list AppProfiles for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 3; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesResponse { - // The list of requested app profiles. - repeated AppProfile app_profiles = 1; - - // Set if not all app profiles could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; - - // Locations from which AppProfile information could not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from `app_profiles`. - // Values are of the form `projects//locations/` - repeated string failed_locations = 3; -} - -// Request message for BigtableInstanceAdmin.UpdateAppProfile. -message UpdateAppProfileRequest { - // The app profile which will (partially) replace the current value. - AppProfile app_profile = 1; - - // The subset of app profile fields which should be replaced. - // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2; - - // If true, ignore safety checks when updating the app profile. - bool ignore_warnings = 3; -} - - -// Request message for BigtableInstanceAdmin.DeleteAppProfile. -message DeleteAppProfileRequest { - // The unique name of the app profile to be deleted. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; - - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; -} - -// The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py deleted file mode 100644 index 59fb73a65fa7..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ /dev/null @@ -1,1780 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/bigtable.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_v2.proto import ( - data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/bigtable.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=_b( - "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}" - ), - serialized_pb=_b( - '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_READROWSREQUEST = _descriptor.Descriptor( - name="ReadRowsRequest", - full_name="google.bigtable.v2.ReadRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.bigtable.v2.ReadRowsRequest.rows", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.v2.ReadRowsRequest.filter", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="rows_limit", - full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", - index=4, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=285, - serialized_end=494, -) - - -_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name="CellChunk", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", - index=5, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value_size", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", - index=6, - number=7, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="reset_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", - index=7, - number=8, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="commit_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", - index=8, - number=9, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_status", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=612, - serialized_end=873, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name="ReadRowsResponse", - full_name="google.bigtable.v2.ReadRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="chunks", - full_name="google.bigtable.v2.ReadRowsResponse.chunks", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="last_scanned_row_key", - full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_READROWSRESPONSE_CELLCHUNK,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=497, - serialized_end=873, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name="SampleRowKeysRequest", - full_name="google.bigtable.v2.SampleRowKeysRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=875, - serialized_end=980, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name="SampleRowKeysResponse", - full_name="google.bigtable.v2.SampleRowKeysResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="offset_bytes", - full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=982, - serialized_end=1044, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name="MutateRowRequest", - full_name="google.bigtable.v2.MutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowRequest.mutations", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1047, - serialized_end=1224, -) - - -_MUTATEROWRESPONSE = _descriptor.Descriptor( - name="MutateRowResponse", - full_name="google.bigtable.v2.MutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1226, - serialized_end=1245, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsRequest.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1419, - serialized_end=1497, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name="MutateRowsRequest", - full_name="google.bigtable.v2.MutateRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsRequest.entries", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_MUTATEROWSREQUEST_ENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1248, - serialized_end=1497, -) - - -_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsResponse.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1585, - serialized_end=1643, -) - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name="MutateRowsResponse", - full_name="google.bigtable.v2.MutateRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsResponse.entries", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_MUTATEROWSRESPONSE_ENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1500, - serialized_end=1643, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name="CheckAndMutateRowRequest", - full_name="google.bigtable.v2.CheckAndMutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", - index=1, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="true_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="false_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1646, - serialized_end=1943, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name="CheckAndMutateRowResponse", - full_name="google.bigtable.v2.CheckAndMutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_matched", - full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1945, - serialized_end=1999, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name="ReadModifyWriteRowRequest", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2002, - serialized_end=2195, -) - - -_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name="ReadModifyWriteRowResponse", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2197, - serialized_end=2263, -) - -_READROWSREQUEST.fields_by_name[ - "rows" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name[ - "filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "family_name" -].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "qualifier" -].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE -_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "reset_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "commit_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "predicate_filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "true_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "false_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name[ - "rules" -].message_type = ( - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE -) -_READMODIFYWRITEROWRESPONSE.fields_by_name[ - "row" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "CheckAndMutateRowResponse" -] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowRequest" -] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowResponse" -] = _READMODIFYWRITEROWRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType( - "ReadRowsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_READROWSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.ReadRows. - - - Attributes: - table_name: - Required. The unique name of the table from which to read. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - rows: - The row keys and/or ranges to read. If not specified, reads - from all rows. - filter: - The filter to apply to the contents of the specified row(s). - If unset, reads the entirety of each row. - rows_limit: - The read will terminate after committing to N rows' worth of - results. The default (zero) is to return all results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - ), -) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType( - "ReadRowsResponse", - (_message.Message,), - dict( - CellChunk=_reflection.GeneratedProtocolMessageType( - "CellChunk", - (_message.Message,), - dict( - DESCRIPTOR=_READROWSRESPONSE_CELLCHUNK, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Specifies a piece of a row's contents returned as part of the read - response stream. - - - Attributes: - row_key: - The row key for this chunk of data. If the row key is empty, - this CellChunk is a continuation of the same row as the - previous CellChunk in the response stream, even if that - CellChunk was in a previous ReadRowsResponse message. - family_name: - The column family name for this chunk of data. If this message - is not present this CellChunk is a continuation of the same - column family as the previous CellChunk. The empty string can - occur as a column family name in a response so clients must - check explicitly for the presence of this message, not just - for ``family_name.value`` being non-empty. - qualifier: - The column qualifier for this chunk of data. If this message - is not present, this CellChunk is a continuation of the same - column as the previous CellChunk. Column qualifiers may be - empty so clients must check for the presence of this message, - not just for ``qualifier.value`` being non-empty. - timestamp_micros: - The cell's stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. Timestamps are only set in the first CellChunk per - cell (for cells split into multiple chunks). - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - on the first CellChunk per cell. - value: - The value stored in the cell. Cell values can be split across - multiple CellChunks. In that case only the value field will be - set in CellChunks after the first: the timestamp and labels - will only be present in the first CellChunk, even if the first - CellChunk came in a previous ReadRowsResponse. - value_size: - If this CellChunk is part of a chunked cell value and this is - not the final chunk of that cell, value\_size will be set to - the total length of the cell value. The client can use this - size to pre-allocate memory to hold the full cell value. - row_status: - Signals to the client concerning previous CellChunks received. - reset_row: - Indicates that the client should drop all previous chunks for - ``row_key``, as it will be re-read from the beginning. - commit_row: - Indicates that the client can safely process all previous - chunks for ``row_key``, as its data has been fully read. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - ), - ), - DESCRIPTOR=_READROWSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.ReadRows. - - - Attributes: - chunks: - A collection of a row's contents as part of the read request. - last_scanned_row_key: - Optionally the server might return the row key of the last row - it has scanned. The client can use this to construct a more - efficient retry request if needed: any row keys or portions of - ranges less than this row key can be dropped from the request. - This is primarily useful for cases where the server has read a - lot of data that was filtered out since the last committed row - key, allowing the client to skip that work on a retry. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - ), -) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysRequest", - (_message.Message,), - dict( - DESCRIPTOR=_SAMPLEROWKEYSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.SampleRowKeys. - - - Attributes: - table_name: - Required. The unique name of the table from which to sample - row keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - ), -) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysResponse", - (_message.Message,), - dict( - DESCRIPTOR=_SAMPLEROWKEYSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.SampleRowKeys. - - - Attributes: - row_key: - Sorted streamed sequence of sample row keys in the table. The - table might have contents before the first row key in the list - and after the last one, but a key containing the empty string - indicates "end of table" and will be the last response given, - if present. Note that row keys in this list may not have ever - been written to or read from, and users should therefore not - make any assumptions about the row key structure that are - specific to their use case. - offset_bytes: - Approximate total storage space used by all rows in the table - which precede ``row_key``. Buffering the contents of all rows - between two subsequent samples would require space roughly - equal to the difference in their ``offset_bytes`` fields. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - ), -) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowRequest", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.MutateRow. - - - Attributes: - table_name: - Required. The unique name of the table to which the mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - row_key: - Required. The key of the row to which the mutation should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least - one entry and at most 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - ), -) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowResponse", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.MutateRow. - - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - ), -) -_sym_db.RegisterMessage(MutateRowResponse) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowsRequest", - (_message.Message,), - dict( - Entry=_reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""A mutation for a given row. - - - Attributes: - row_key: - The key of the row to which the ``mutations`` should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Mutations are applied in order, meaning that earlier - mutations can be masked by later ones. You must specify at - least one mutation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - ), - ), - DESCRIPTOR=_MUTATEROWSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for BigtableService.MutateRows. - - - Attributes: - table_name: - Required. The unique name of the table to which the mutations - should be applied. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - entries: - Required. The row keys and corresponding mutations to be - applied in bulk. Each entry is applied as an atomic mutation, - but the entries may be applied in arbitrary order (even - between entries for the same row). At least one entry must be - specified, and in total the entries can contain at most 100000 - mutations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - ), -) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowsResponse", - (_message.Message,), - dict( - Entry=_reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""The result of applying a passed mutation in the original - request. - - - Attributes: - index: - The index into the original request's ``entries`` list of the - Entry for which a result is being reported. - status: - The result of the request Entry identified by ``index``. - Depending on how requests are batched during execution, it is - possible for one Entry to fail due to an error with another - Entry. In the event that this occurs, the same error will be - reported for both entries. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - ), - ), - DESCRIPTOR=_MUTATEROWSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for BigtableService.MutateRows. - - - Attributes: - entries: - One or more results for Entries from the batch request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - ), -) -_sym_db.RegisterMessage(MutateRowsResponse) -_sym_db.RegisterMessage(MutateRowsResponse.Entry) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CHECKANDMUTATEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.CheckAndMutateRow. - - - Attributes: - table_name: - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - row_key: - Required. The key of the row to which the conditional mutation - should be applied. - predicate_filter: - The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If - unset, checks that the row contains any values at all. - true_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``false_mutations`` is empty, and at most - 100000. - false_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``true_mutations`` is empty, and at most - 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - ), -) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowResponse", - (_message.Message,), - dict( - DESCRIPTOR=_CHECKANDMUTATEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.CheckAndMutateRow. - - - Attributes: - predicate_matched: - Whether or not the request's ``predicate_filter`` yielded any - results for the specified row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - ), -) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowRequest", - (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.ReadModifyWriteRow. - - - Attributes: - table_name: - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the "default" application profile will be used. - row_key: - Required. The key of the row to which the read/modify/write - rules should be applied. - rules: - Required. Rules specifying how the specified row's contents - are to be transformed into writes. Entries are applied in - order, meaning that earlier rules will affect the results of - later ones. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - ), -) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowResponse", - (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.ReadModifyWriteRow. - - - Attributes: - row: - A Row containing the new contents of all cells modified by the - request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - ), -) -_sym_db.RegisterMessage(ReadModifyWriteRowResponse) - - -DESCRIPTOR._options = None -_READROWSREQUEST.fields_by_name["table_name"]._options = None -_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None - -_BIGTABLE = _descriptor.ServiceDescriptor( - name="Bigtable", - full_name="google.bigtable.v2.Bigtable", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only" - ), - serialized_start=2266, - serialized_end=4126, - methods=[ - _descriptor.MethodDescriptor( - name="ReadRows", - full_name="google.bigtable.v2.Bigtable.ReadRows", - index=0, - containing_service=None, - input_type=_READROWSREQUEST, - output_type=_READROWSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id' - ), - ), - _descriptor.MethodDescriptor( - name="SampleRowKeys", - full_name="google.bigtable.v2.Bigtable.SampleRowKeys", - index=1, - containing_service=None, - input_type=_SAMPLEROWKEYSREQUEST, - output_type=_SAMPLEROWKEYSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id" - ), - ), - _descriptor.MethodDescriptor( - name="MutateRow", - full_name="google.bigtable.v2.Bigtable.MutateRow", - index=2, - containing_service=None, - input_type=_MUTATEROWREQUEST, - output_type=_MUTATEROWRESPONSE, - serialized_options=_b( - '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' - ), - ), - _descriptor.MethodDescriptor( - name="MutateRows", - full_name="google.bigtable.v2.Bigtable.MutateRows", - index=3, - containing_service=None, - input_type=_MUTATEROWSREQUEST, - output_type=_MUTATEROWSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id' - ), - ), - _descriptor.MethodDescriptor( - name="CheckAndMutateRow", - full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", - index=4, - containing_service=None, - input_type=_CHECKANDMUTATEROWREQUEST, - output_type=_CHECKANDMUTATEROWRESPONSE, - serialized_options=_b( - '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' - ), - ), - _descriptor.MethodDescriptor( - name="ReadModifyWriteRow", - full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", - index=5, - containing_service=None, - input_type=_READMODIFYWRITEROWREQUEST, - output_type=_READMODIFYWRITEROWRESPONSE, - serialized_options=_b( - "\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id" - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLE) - -DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE - -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py deleted file mode 100644 index 4dd6cded9bc4..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ /dev/null @@ -1,148 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, -) - - -class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/ReadRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - "/google.bigtable.v2.Bigtable/SampleRowKeys", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/MutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/MutateRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - ) - - -class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - "ReadRows": grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, - ), - "SampleRowKeys": grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, - ), - "MutateRow": grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, - ), - "MutateRows": grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, - ), - "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, - ), - "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.v2.Bigtable", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto deleted file mode 100644 index b1f729517a47..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/v1/bigtable_data.proto"; -import "google/bigtable/v1/bigtable_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" - body: "*" - }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { - get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" - }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" - body: "*" - }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" - body: "*" - }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" - body: "*" - }; - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" - body: "*" - }; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto deleted file mode 100644 index d734ececaec3..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/bigtable/v1/bigtable_data.proto"; -import "google/rpc/status.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - - // A set of rows from which to read. Entries need not be in order, and will - // be deduplicated before reading. - // The total serialized size of the set must not exceed 1MB. - RowSet row_set = 8; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // At least one mutation must be specified. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys/mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The results for each Entry from the request, presented in the order - // in which the entries were originally given. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - repeated google.rpc.Status statuses = 1; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto deleted file mode 100644 index 2d5bddf302aa..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// -// -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables" - body: "*" - }; - } - - // Creates a new table from the specified snapshot. The target table must - // not exist. The snapshot and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - body: "*" - }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/tables" - }; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Performs a series of column family modifications on the specified table. - // Either all or none of the modifications will occur before this method - // returns, but data requests received prior to that point may see a table - // where only some modifications have taken effect. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - body: "*" - }; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" - body: "*" - }; - } - - // Generates a consistency token for a Table, which can be used in - // CheckConsistency to check whether mutations to the table that finished - // before this call started have been replicated. The tokens will be available - // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - body: "*" - }; - } - - // Checks replication consistency based on a consistency token, that is, if - // replication has caught up based on the conditions specified in the token - // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - body: "*" - }; - } - - // Creates a new snapshot in the specified cluster from the specified - // source table. The cluster and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" - body: "*" - }; - } - - // Gets metadata information about the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } - - // Lists all snapshots associated with the specified cluster. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - }; - } - - // Permanently deletes the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The Table to create. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - repeated Split initial_splits = 4; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotRequest { - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string source_snapshot = 3; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Delete all rows or by prefix. - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The view to be applied to the returned tables' fields. - // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. - Table.View view = 2; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 4; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested instance. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // The unique name of the requested table. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The view to be applied to the returned table's fields. - // Defaults to `SCHEMA_VIEW` if unspecified. - Table.View view = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // The unique name of the table to be deleted. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - // Column familiy modifications. - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // The unique name of the table whose families should be modified. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenRequest { - // The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenResponse { - // The generated consistency token. - string consistency_token = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyRequest { - // The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyResponse { - // True only if the token is consistent. A token is consistent if replication - // has caught up with the restrictions specified in the request. - bool consistent = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableRequest { - // The unique name of the table to have the snapshot taken. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The name of the cluster where the snapshot will be created in. - // Values are of the form - // `projects//instances//clusters/`. - string cluster = 2; - - // The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects//instances//clusters//snapshots/mysnapshot`. - string snapshot_id = 3; - - // The amount of time that the new snapshot can stay active after it is - // created. Once 'ttl' expires, the snapshot will get deleted. The maximum - // amount of time a snapshot can stay active is 7 days. If 'ttl' is not - // specified, the default value of 24 hours will be used. - google.protobuf.Duration ttl = 4; - - // Description of the snapshot. - string description = 5; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message GetSnapshotRequest { - // The unique name of the requested snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsRequest { - // The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects//instances//clusters/`. - // Use ` = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects//instances//clusters/-`. - string parent = 1; - - // The maximum number of snapshots to return per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsResponse { - // The snapshots present in the requested cluster. - repeated Snapshot snapshots = 1; - - // Set if not all snapshots could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message DeleteSnapshotRequest { - // The unique name of the snapshot to be deleted. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// The metadata for the Operation returned by SnapshotTable. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableMetadata { - // The request that prompted the initiation of this SnapshotTable operation. - SnapshotTableRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateTableFromSnapshot. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotMetadata { - // The request that prompted the initiation of this CreateTableFromSnapshot - // operation. - CreateTableFromSnapshotRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f560e..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee17c1..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto b/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede65592f..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/common.proto b/bigtable/google/cloud/bigtable_v2/proto/common.proto deleted file mode 100644 index 0ece12780eb9..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/common.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/data.proto b/bigtable/google/cloud/bigtable_v2/proto/data.proto deleted file mode 100644 index 8fd0c15cb3e3..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/data.proto +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.v2; - -option csharp_namespace = "Google.Cloud.Bigtable.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "DataProto"; -option java_package = "com.google.bigtable.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\V2"; - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family intersection -// of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column intersection of a -// table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its `column_qualifier_regex_filter` field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of `timestamp_micros` which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // The row key at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_key { - // Used when giving an inclusive lower bound for the range. - bytes start_key_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_key_open = 2; - } - - // The row key at which to end the range. - // If neither field is set, interpreted as the infinite row key, exclusive. - oneof end_key { - // Used when giving an exclusive upper bound for the range. - bytes end_key_open = 3; - - // Used when giving an inclusive upper bound for the range. - bytes end_key_closed = 4; - } -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from <column_family>:<start_qualifier> to -// <column_family>:<end_qualifier>, where both bounds can be either -// inclusive or exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within `column_family`). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_closed = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_open = 3; - } - - // The column qualifier at which to end the range (within `column_family`). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_closed = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_open = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_open = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_closed = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_open = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the `value_regex_filter`, -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that `RE2(.)` is equivalent by default to -// `RE2([^\n])`, meaning that it does not match newlines. When attempting to -// match an arbitrary byte, you should therefore use the escape sequence `\C`, -// which may need to be further escaped as `\\C` in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the `strip_value_transformer`, which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If `predicate_filter` outputs any cells, then `true_filter` will be - // evaluated on the input row. Otherwise, `false_filter` will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if `predicate_filter` returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if `predicate_filter` does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the `:` - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // `\n`, it is sufficient to use `.` as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the `\C` - // escape sequence must be used if a true wildcard is desired. The `.` - // character will not match the new line character `\n`, which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, - // skip all earlier cells in `foo:bar`, and then begin matching again in - // column `foo:bar2`. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern `[a-z0-9\\-]+` - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a `apply_label_transformer`. It is okay for - // an Interleave to contain multiple `apply_label_transformers`, as they - // will be applied to separate copies of the input. This may be relaxed in - // the future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the granularity of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that `append_value` be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that `increment_amount` be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/bigtable/google/cloud/bigtable_v2/proto/data_pb2.py deleted file mode 100644 index fb753be1e670..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ /dev/null @@ -1,2613 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/data.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/data.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=_b( - "\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" - ), - serialized_pb=_b( - '\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' - ), -) - - -_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.bigtable.v2.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.v2.Row.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="families", - full_name="google.bigtable.v2.Row.families", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=65, - serialized_end=129, -) - - -_FAMILY = _descriptor.Descriptor( - name="Family", - full_name="google.bigtable.v2.Family", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.v2.Family.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.bigtable.v2.Family.columns", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=131, - serialized_end=198, -) - - -_COLUMN = _descriptor.Descriptor( - name="Column", - full_name="google.bigtable.v2.Column", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.Column.qualifier", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cells", - full_name="google.bigtable.v2.Column.cells", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=200, - serialized_end=268, -) - - -_CELL = _descriptor.Descriptor( - name="Cell", - full_name="google.bigtable.v2.Cell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Cell.timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Cell.value", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.Cell.labels", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=270, - serialized_end=333, -) - - -_ROWRANGE = _descriptor.Descriptor( - name="RowRange", - full_name="google.bigtable.v2.RowRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_key_closed", - full_name="google.bigtable.v2.RowRange.start_key_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_key_open", - full_name="google.bigtable.v2.RowRange.start_key_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_key_open", - full_name="google.bigtable.v2.RowRange.end_key_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_key_closed", - full_name="google.bigtable.v2.RowRange.end_key_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key", - full_name="google.bigtable.v2.RowRange.start_key", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key", - full_name="google.bigtable.v2.RowRange.end_key", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=336, - serialized_end=474, -) - - -_ROWSET = _descriptor.Descriptor( - name="RowSet", - full_name="google.bigtable.v2.RowSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row_keys", - full_name="google.bigtable.v2.RowSet.row_keys", - index=0, - number=1, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_ranges", - full_name="google.bigtable.v2.RowSet.row_ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=476, - serialized_end=552, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name="ColumnRange", - full_name="google.bigtable.v2.ColumnRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ColumnRange.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_qualifier", - full_name="google.bigtable.v2.ColumnRange.start_qualifier", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_qualifier", - full_name="google.bigtable.v2.ColumnRange.end_qualifier", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=555, - serialized_end=753, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name="TimestampRange", - full_name="google.bigtable.v2.TimestampRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=755, - serialized_end=833, -) - - -_VALUERANGE = _descriptor.Descriptor( - name="ValueRange", - full_name="google.bigtable.v2.ValueRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_value_closed", - full_name="google.bigtable.v2.ValueRange.start_value_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_value_open", - full_name="google.bigtable.v2.ValueRange.start_value_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_value_closed", - full_name="google.bigtable.v2.ValueRange.end_value_closed", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_value_open", - full_name="google.bigtable.v2.ValueRange.end_value_open", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_value", - full_name="google.bigtable.v2.ValueRange.start_value", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_value", - full_name="google.bigtable.v2.ValueRange.end_value", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=836, - serialized_end=988, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name="Chain", - full_name="google.bigtable.v2.RowFilter.Chain", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Chain.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1807, - serialized_end=1862, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name="Interleave", - full_name="google.bigtable.v2.RowFilter.Interleave", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Interleave.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1864, - serialized_end=1924, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name="Condition", - full_name="google.bigtable.v2.RowFilter.Condition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="true_filter", - full_name="google.bigtable.v2.RowFilter.Condition.true_filter", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="false_filter", - full_name="google.bigtable.v2.RowFilter.Condition.false_filter", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1927, - serialized_end=2100, -) - -_ROWFILTER = _descriptor.Descriptor( - name="RowFilter", - full_name="google.bigtable.v2.RowFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="chain", - full_name="google.bigtable.v2.RowFilter.chain", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="interleave", - full_name="google.bigtable.v2.RowFilter.interleave", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="condition", - full_name="google.bigtable.v2.RowFilter.condition", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="sink", - full_name="google.bigtable.v2.RowFilter.sink", - index=3, - number=16, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="pass_all_filter", - full_name="google.bigtable.v2.RowFilter.pass_all_filter", - index=4, - number=17, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="block_all_filter", - full_name="google.bigtable.v2.RowFilter.block_all_filter", - index=5, - number=18, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_key_regex_filter", - full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", - index=6, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_sample_filter", - full_name="google.bigtable.v2.RowFilter.row_sample_filter", - index=7, - number=14, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="family_name_regex_filter", - full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", - index=8, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_qualifier_regex_filter", - full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", - index=9, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_range_filter", - full_name="google.bigtable.v2.RowFilter.column_range_filter", - index=10, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="timestamp_range_filter", - full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", - index=11, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value_regex_filter", - full_name="google.bigtable.v2.RowFilter.value_regex_filter", - index=12, - number=9, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value_range_filter", - full_name="google.bigtable.v2.RowFilter.value_range_filter", - index=13, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_offset_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", - index=14, - number=10, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", - index=15, - number=11, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cells_per_column_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", - index=16, - number=12, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="strip_value_transformer", - full_name="google.bigtable.v2.RowFilter.strip_value_transformer", - index=17, - number=13, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="apply_label_transformer", - full_name="google.bigtable.v2.RowFilter.apply_label_transformer", - index=18, - number=19, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter", - full_name="google.bigtable.v2.RowFilter.filter", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=991, - serialized_end=2110, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name="SetCell", - full_name="google.bigtable.v2.Mutation.SetCell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.SetCell.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Mutation.SetCell.value", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2408, - serialized_end=2505, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name="DeleteFromColumn", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="time_range", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2507, - serialized_end=2628, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name="DeleteFromFamily", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2630, - serialized_end=2669, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name="DeleteFromRow", - full_name="google.bigtable.v2.Mutation.DeleteFromRow", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2671, - serialized_end=2686, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.bigtable.v2.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="set_cell", - full_name="google.bigtable.v2.Mutation.set_cell", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete_from_column", - full_name="google.bigtable.v2.Mutation.delete_from_column", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete_from_family", - full_name="google.bigtable.v2.Mutation.delete_from_family", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete_from_row", - full_name="google.bigtable.v2.Mutation.delete_from_row", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _MUTATION_SETCELL, - _MUTATION_DELETEFROMCOLUMN, - _MUTATION_DELETEFROMFAMILY, - _MUTATION_DELETEFROMROW, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mutation", - full_name="google.bigtable.v2.Mutation.mutation", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=2113, - serialized_end=2698, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name="ReadModifyWriteRule", - full_name="google.bigtable.v2.ReadModifyWriteRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="append_value", - full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="increment_amount", - full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.v2.ReadModifyWriteRule.rule", - index=0, - containing_type=None, - fields=[], - ), - ], - serialized_start=2701, - serialized_end=2829, -) - -_ROW.fields_by_name["families"].message_type = _FAMILY -_FAMILY.fields_by_name["columns"].message_type = _COLUMN -_COLUMN.fields_by_name["cells"].message_type = _CELL -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_closed"] -) -_ROWRANGE.fields_by_name[ - "start_key_closed" -].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_open"] -) -_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "start_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_open"] -) -_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_closed"] -) -_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_closed"] -) -_VALUERANGE.fields_by_name[ - "start_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_open"] -) -_VALUERANGE.fields_by_name[ - "start_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_closed"] -) -_VALUERANGE.fields_by_name[ - "end_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_open"] -) -_VALUERANGE.fields_by_name[ - "end_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) -_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["interleave"] -) -_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["condition"] -) -_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) -_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["pass_all_filter"] -) -_ROWFILTER.fields_by_name[ - "pass_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["block_all_filter"] -) -_ROWFILTER.fields_by_name[ - "block_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_key_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "row_key_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_sample_filter"] -) -_ROWFILTER.fields_by_name[ - "row_sample_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["family_name_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "family_name_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "column_qualifier_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_range_filter"] -) -_ROWFILTER.fields_by_name[ - "column_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["timestamp_range_filter"] -) -_ROWFILTER.fields_by_name[ - "timestamp_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "value_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_range_filter"] -) -_ROWFILTER.fields_by_name[ - "value_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_offset_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_column_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["strip_value_transformer"] -) -_ROWFILTER.fields_by_name[ - "strip_value_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["apply_label_transformer"] -) -_ROWFILTER.fields_by_name[ - "apply_label_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) -_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_column"] -) -_MUTATION.fields_by_name[ - "delete_from_column" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_family"] -) -_MUTATION.fields_by_name[ - "delete_from_family" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_row"] -) -_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["append_value"] -) -_READMODIFYWRITERULE.fields_by_name[ - "append_value" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["increment_amount"] -) -_READMODIFYWRITERULE.fields_by_name[ - "increment_amount" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -DESCRIPTOR.message_types_by_name["Row"] = _ROW -DESCRIPTOR.message_types_by_name["Family"] = _FAMILY -DESCRIPTOR.message_types_by_name["Column"] = _COLUMN -DESCRIPTOR.message_types_by_name["Cell"] = _CELL -DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE -DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET -DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE -DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Row = _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - dict( - DESCRIPTOR=_ROW, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies the complete (requested) contents of a single - row of a table. Rows which exceed 256MiB in size cannot be read in full. - - - Attributes: - key: - The unique key which identifies this row within its table. - This is the same key that's used to identify the row in, for - example, a MutateRowRequest. May contain any non-empty byte - string up to 4KiB in length. - families: - May be empty, but only if the entire row is empty. The mutual - ordering of column families is not specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - ), -) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType( - "Family", - (_message.Message,), - dict( - DESCRIPTOR=_FAMILY, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column - family intersection of a table. - - - Attributes: - name: - The unique key which identifies this family within its row. - This is the same key that's used to identify the family in, - for example, a RowFilter which sets its - "family\_name\_regex\_filter" field. Must match - ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may - produce cells in a sentinel family with an empty name. Must be - no greater than 64 characters in length. - columns: - Must not be empty. Sorted in order of increasing "qualifier". - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - ), -) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType( - "Column", - (_message.Message,), - dict( - DESCRIPTOR=_COLUMN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column - intersection of a table. - - - Attributes: - qualifier: - The unique key which identifies this column within its family. - This is the same key that's used to identify the column in, - for example, a RowFilter which sets its - ``column_qualifier_regex_filter`` field. May contain any byte - string, including the empty string, up to 16kiB in length. - cells: - Must not be empty. Sorted in order of decreasing - "timestamp\_micros". - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - ), -) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType( - "Cell", - (_message.Message,), - dict( - DESCRIPTOR=_CELL, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single - row/column/timestamp of a table. - - - Attributes: - timestamp_micros: - The cell's stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. - value: - The value stored in the cell. May contain any byte string, - including the empty string, up to 100MiB in length. - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - ), -) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType( - "RowRange", - (_message.Message,), - dict( - DESCRIPTOR=_ROWRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of rows. - - - Attributes: - start_key: - The row key at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_key_closed: - Used when giving an inclusive lower bound for the range. - start_key_open: - Used when giving an exclusive lower bound for the range. - end_key: - The row key at which to end the range. If neither field is - set, interpreted as the infinite row key, exclusive. - end_key_open: - Used when giving an exclusive upper bound for the range. - end_key_closed: - Used when giving an inclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - ), -) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType( - "RowSet", - (_message.Message,), - dict( - DESCRIPTOR=_ROWSET, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a non-contiguous set of rows. - - - Attributes: - row_keys: - Single rows included in the set. - row_ranges: - Contiguous row ranges included in the set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - ), -) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType( - "ColumnRange", - (_message.Message,), - dict( - DESCRIPTOR=_COLUMNRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of columns within a single - column family. The range spans from : - to :, where both bounds can be either - inclusive or exclusive. - - - Attributes: - family_name: - The name of the column family within which this range falls. - start_qualifier: - The column qualifier at which to start the range (within - ``column_family``). If neither field is set, interpreted as - the empty string, inclusive. - start_qualifier_closed: - Used when giving an inclusive lower bound for the range. - start_qualifier_open: - Used when giving an exclusive lower bound for the range. - end_qualifier: - The column qualifier at which to end the range (within - ``column_family``). If neither field is set, interpreted as - the infinite string, exclusive. - end_qualifier_closed: - Used when giving an inclusive upper bound for the range. - end_qualifier_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - ), -) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType( - "TimestampRange", - (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specified a contiguous range of microsecond timestamps. - - - Attributes: - start_timestamp_micros: - Inclusive lower bound. If left empty, interpreted as 0. - end_timestamp_micros: - Exclusive upper bound. If left empty, interpreted as infinity. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - ), -) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType( - "ValueRange", - (_message.Message,), - dict( - DESCRIPTOR=_VALUERANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of raw byte values. - - - Attributes: - start_value: - The value at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_value_closed: - Used when giving an inclusive lower bound for the range. - start_value_open: - Used when giving an exclusive lower bound for the range. - end_value: - The value at which to end the range. If neither field is set, - interpreted as the infinite string, exclusive. - end_value_closed: - Used when giving an inclusive upper bound for the range. - end_value_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - ), -) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType( - "RowFilter", - (_message.Message,), - dict( - Chain=_reflection.GeneratedProtocolMessageType( - "Chain", - (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_CHAIN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends rows through several RowFilters in - sequence. - - - Attributes: - filters: - The elements of "filters" are chained together to process the - input row: in row -> f(0) -> intermediate row -> f(1) -> ... - -> f(N) -> out row The full chain is executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - ), - ), - Interleave=_reflection.GeneratedProtocolMessageType( - "Interleave", - (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_INTERLEAVE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends each row to each of several - component RowFilters and interleaves the results. - - - Attributes: - filters: - The elements of "filters" all process a copy of the input row, - and the results are pooled, sorted, and combined into a single - output row. If multiple cells are produced with the same - column and timestamp, they will all appear in the output row - in an unspecified mutual order. Consider the following - example, with three filters: :: - input row | - ----------------------------------------------------- - | | | - f(0) f(1) f(2) - | | | 1: - foo,bar,10,x foo,bar,10,z far,bar,7,a - 2: foo,blah,11,z far,blah,5,x - far,blah,5,x | | - | - ----------------------------------------------------- - | 1: foo,bar,10,z // could have - switched with #2 2: foo,bar,10,x // - could have switched with #1 3: - foo,blah,11,z 4: far,bar,7,a 5: - far,blah,5,x // identical to #6 6: - far,blah,5,x // identical to #5 All interleaved filters are - executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - ), - ), - Condition=_reflection.GeneratedProtocolMessageType( - "Condition", - (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_CONDITION, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which evaluates one of two possible - RowFilters, depending on whether or not a predicate RowFilter outputs - any cells from the input row. - - IMPORTANT NOTE: The predicate filter does not execute atomically with - the true and false filters, which may lead to inconsistent or unexpected - results. Additionally, Condition filters have poor performance, - especially when filters are set for the false condition. - - - Attributes: - predicate_filter: - If ``predicate_filter`` outputs any cells, then - ``true_filter`` will be evaluated on the input row. Otherwise, - ``false_filter`` will be evaluated. - true_filter: - The filter to apply to the input row if ``predicate_filter`` - returns any results. If not provided, no results will be - returned in the true case. - false_filter: - The filter to apply to the input row if ``predicate_filter`` - does not return any results. If not provided, no results will - be returned in the false case. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - ), - ), - DESCRIPTOR=_ROWFILTER, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Takes a row as input and produces an alternate view of the - row based on specified rules. For example, a RowFilter might trim down a - row to include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their values. - More complicated filters can be composed out of these components to - express requests such as, "within every column of a particular family, - give just the two most recent cells which are older than timestamp X." - - There are two broad categories of RowFilters (true filters and - transformers), as well as two ways to compose simple filters into more - complex ones (chains and interleaves). They work as follows: - - - True filters alter the input row by excluding some of its cells - wholesale from the output row. An example of a true filter is the - ``value_regex_filter``, which excludes cells whose values don't match - the specified pattern. All regex true filters use RE2 syntax - (https://github.com/google/re2/wiki/Syntax) in raw byte mode - (RE2::Latin1), and are evaluated as full matches. An important point - to keep in mind is that ``RE2(.)`` is equivalent by default to - ``RE2([^\n])``, meaning that it does not match newlines. When - attempting to match an arbitrary byte, you should therefore use the - escape sequence ``\C``, which may need to be further escaped as - ``\\C`` in your client language. - - - Transformers alter the input row by changing the values of some of - its cells in the output, without excluding them completely. - Currently, the only supported transformer is the - ``strip_value_transformer``, which replaces every cell's value with - the empty string. - - - Chains and interleaves are described in more detail in the - RowFilter.Chain and RowFilter.Interleave documentation. - - The total serialized size of a RowFilter message must not exceed 4096 - bytes, and RowFilters may not be nested within each other (in Chains or - Interleaves) to a depth of more than 20. - - - Attributes: - filter: - Which of the possible RowFilter types to apply. If none are - set, this RowFilter returns all cells in the input row. - chain: - Applies several RowFilters to the data in sequence, - progressively narrowing the results. - interleave: - Applies several RowFilters to the data in parallel and - combines the results. - condition: - Applies one of two possible RowFilters to the data based on - the output of a predicate RowFilter. - sink: - ADVANCED USE ONLY. Hook for introspection into the RowFilter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Consider the following example: :: - Chain( FamilyRegex("A"), Interleave( - All(), Chain(Label("foo"), Sink()) ), - QualifierRegex("B") ) A,A,1,w - A,B,2,x B,B,4,z - | FamilyRegex("A") - | A,A,1,w - A,B,2,x | - +------------+-------------+ | - | All() Label(foo) - | | A,A,1,w - A,A,1,w,labels:[foo] A,B,2,x - A,B,2,x,labels:[foo] | - | | Sink() - --------------+ | | - | +------------+ x------+ - A,A,1,w,labels:[foo] | - A,B,2,x,labels:[foo] A,A,1,w - | A,B,2,x - | | - | QualifierRegex("B") - | | - | A,B,2,x - | | - | - +--------------------------------+ - | A,A,1,w,labels:[foo] - A,B,2,x,labels:[foo] // could be switched - A,B,2,x // could be switched Despite being - excluded by the qualifier filter, a copy of every cell that - reaches the sink is present in the final result. As with an - [Interleave][google.bigtable.v2.RowFilter.Interleave], - duplicate cells are possible, and appear in an unspecified - mutual order. In this case we have a duplicate with column - "A:B" and timestamp 2, because one copy passed through the all - filter while the other was passed through the label and sink. - Note that one copy has label "foo", while the other does not. - Cannot be used within the ``predicate_filter``, - ``true_filter``, or ``false_filter`` of a - [Condition][google.bigtable.v2.RowFilter.Condition]. - pass_all_filter: - Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - block_all_filter: - Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - row_key_regex_filter: - Matches only cells from rows whose keys satisfy the given RE2 - regex. In other words, passes through the entire row when the - key matches, and otherwise produces an empty row. Note that, - since row keys can contain arbitrary bytes, the ``\C`` escape - sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\n``, which - may be present in a binary key. - row_sample_filter: - Matches all cells from a row with probability p, and matches - no cells from the row with probability 1-p. - family_name_regex_filter: - Matches only cells from columns whose families satisfy the - given RE2 regex. For technical reasons, the regex must not - contain the ``:`` character, even if it is not being used as a - literal. Note that, since column families cannot contain the - new line character ``\n``, it is sufficient to use ``.`` as a - full wildcard when matching column family names. - column_qualifier_regex_filter: - Matches only cells from columns whose qualifiers satisfy the - given RE2 regex. Note that, since column qualifiers can - contain arbitrary bytes, the ``\C`` escape sequence must be - used if a true wildcard is desired. The ``.`` character will - not match the new line character ``\n``, which may be present - in a binary qualifier. - column_range_filter: - Matches only cells from columns within the given range. - timestamp_range_filter: - Matches only cells with timestamps within the given range. - value_regex_filter: - Matches only cells with values that satisfy the given regular - expression. Note that, since cell values can contain arbitrary - bytes, the ``\C`` escape sequence must be used if a true - wildcard is desired. The ``.`` character will not match the - new line character ``\n``, which may be present in a binary - value. - value_range_filter: - Matches only cells with values that fall within the given - range. - cells_per_row_offset_filter: - Skips the first N cells of each row, matching all subsequent - cells. If duplicate cells are present, as is possible when - using an Interleave, each copy of the cell is counted - separately. - cells_per_row_limit_filter: - Matches only the first N cells of each row. If duplicate cells - are present, as is possible when using an Interleave, each - copy of the cell is counted separately. - cells_per_column_limit_filter: - Matches only the most recent N cells within each column. For - example, if N=2, this filter would match column ``foo:bar`` at - timestamps 10 and 9, skip all earlier cells in ``foo:bar``, - and then begin matching again in column ``foo:bar2``. If - duplicate cells are present, as is possible when using an - Interleave, each copy of the cell is counted separately. - strip_value_transformer: - Replaces each cell's value with the empty string. - apply_label_transformer: - Applies the given label to all cells in the output row. This - allows the client to determine which results were produced - from which part of the filter. Values must be at most 15 - characters in length, and match the RE2 pattern - ``[a-z0-9\\-]+`` Due to a technical limitation, it is not - currently possible to apply multiple labels to a cell. As a - result, a Chain may have no more than one sub-filter which - contains a ``apply_label_transformer``. It is okay for an - Interleave to contain multiple ``apply_label_transformers``, - as they will be applied to separate copies of the input. This - may be relaxed in the future. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - ), -) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - dict( - SetCell=_reflection.GeneratedProtocolMessageType( - "SetCell", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_SETCELL, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which sets the value of the specified cell. - - - Attributes: - family_name: - The name of the family into which new data should be written. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column into which new data should be - written. Can be any byte string, including the empty string. - timestamp_micros: - The timestamp of the cell into which new data should be - written. Use -1 for current Bigtable server time. Otherwise, - the client should set this value itself, noting that the - default value is a timestamp of zero if the field is left - unspecified. Values must match the granularity of the table - (e.g. micros, millis). - value: - The value to be written into the specified cell. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - ), - ), - DeleteFromColumn=_reflection.GeneratedProtocolMessageType( - "DeleteFromColumn", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMCOLUMN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes cells from the specified column, - optionally restricting the deletions to a given timestamp range. - - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column from which cells should be - deleted. Can be any byte string, including the empty string. - time_range: - The range of timestamps within which cells should be deleted. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - ), - ), - DeleteFromFamily=_reflection.GeneratedProtocolMessageType( - "DeleteFromFamily", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMFAMILY, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the specified - column family. - - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - ), - ), - DeleteFromRow=_reflection.GeneratedProtocolMessageType( - "DeleteFromRow", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMROW, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the containing - row. - - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - ), - ), - DESCRIPTOR=_MUTATION, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a particular change to be made to the contents - of a row. - - - Attributes: - mutation: - Which of the possible Mutation types to apply. - set_cell: - Set a cell's value. - delete_from_column: - Deletes cells from a column. - delete_from_family: - Deletes cells from a column family. - delete_from_row: - Deletes cells from the entire row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - ), -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRule", - (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITERULE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies an atomic read/modify/write operation on the - latest value of the specified column. - - - Attributes: - family_name: - The name of the family to which the read/modify/write should - be applied. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column to which the read/modify/write - should be applied. Can be any byte string, including the empty - string. - rule: - The rule used to determine the column's new latest value from - its current latest value. - append_value: - Rule specifying that ``append_value`` be appended to the - existing value. If the targeted cell is unset, it will be - treated as containing the empty string. - increment_amount: - Rule specifying that ``increment_amount`` be added to the - existing value. If the targeted cell is unset, it will be - treated as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit big- - endian signed integer), or the entire request will fail. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - ), -) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/bigtable/google/cloud/bigtable_v2/proto/instance.proto b/bigtable/google/cloud/bigtable_v2/proto/instance.proto deleted file mode 100644 index bb69b1f66d42..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/instance.proto +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. -message Instance { - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // The type of the instance. - enum Type { - // The type of the instance is unspecified. If set when creating an - // instance, a `PRODUCTION` instance will be created. If set when updating - // an instance, the type will be left unchanged. - TYPE_UNSPECIFIED = 0; - - // An instance meant for production use. `serve_nodes` must be set - // on the cluster. - PRODUCTION = 1; - - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. - DEVELOPMENT = 2; - } - - // (`OutputOnly`) - // The unique name of the instance. Values are of the form - // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1; - - // The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2; - - // (`OutputOnly`) - // The current state of the instance. - State state = 3; - - // The type of the instance. Defaults to `PRODUCTION`. - Type type = 4; - - // Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. They can be used to filter resources and aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. - map labels = 5; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // (`OutputOnly`) - // The unique name of the cluster. Values are of the form - // `projects//instances//clusters/[a-z][-a-z0-9]*`. - string name = 1; - - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this - // cluster. Currently only zones are supported, so values should be of the - // form `projects//locations/`. - string location = 2; - - // (`OutputOnly`) - // The current state of the cluster. - State state = 3; - - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; - - // (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; -} - -// A configuration object describing how Cloud Bigtable should treat traffic -// from a particular end user application. -message AppProfile { - // Read/write requests may be routed to any cluster in the instance, and will - // fail over to another cluster in the event of transient errors or delays. - // Choosing this option sacrifices read-your-writes consistency to improve - // availability. - message MultiClusterRoutingUseAny { - - } - - // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency, but does not improve - // availability. - message SingleClusterRouting { - // The cluster to which read/write requests should be routed. - string cluster_id = 1; - - // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests to - // the same table/row/column in multiple clusters. - bool allow_transactional_writes = 2; - } - - // (`OutputOnly`) - // The unique name of the app profile. Values are of the form - // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - string name = 1; - - // Strongly validated etag for optimistic concurrency control. Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` to - // fail the request if there has been a modification in the mean time. The - // `update_mask` of the request need not include `etag` for this protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more - // details. - string etag = 2; - - // Optional long form description of the use case for this AppProfile. - string description = 3; - - // The routing policy for all read/write requests which use this app profile. - // A value must be explicitly set. - oneof routing_policy { - // Use a multi-cluster routing policy that may pick any cluster. - MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; - - // Use a single-cluster routing policy. - SingleClusterRouting single_cluster_routing = 6; - } -} diff --git a/bigtable/google/cloud/bigtable_v2/proto/table.proto b/bigtable/google/cloud/bigtable_v2/proto/table.proto deleted file mode 100644 index 5d4374effc59..000000000000 --- a/bigtable/google/cloud/bigtable_v2/proto/table.proto +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - // The state of a table's data in a particular cluster. - message ClusterState { - // Table replication states. - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving Data API requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve Data API requests from this cluster. Depending on - // replication delay, reads may not immediately reflect the state of the - // table in other clusters. - READY = 4; - } - - // (`OutputOnly`) - // The state of replication for the table in this cluster. - ReplicationState replication_state = 1; - } - - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Only populates `name` and fields related to the table's - // replication state. - REPLICATION_VIEW = 3; - - // Populates all fields. - FULL = 4; - } - - // (`OutputOnly`) - // The unique name of the table. Values are of the form - // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` - string name = 1; - - // (`OutputOnly`) - // Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` - map cluster_states = 2; - - // (`CreationOnly`) - // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` - map column_families = 3; - - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL` - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - // Garbage collection rules. - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} - -// A snapshot of a table at a particular time. A snapshot can be used as a -// checkpoint for data restoration or a data source for a new table. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message Snapshot { - // Possible states of a snapshot. - enum State { - // The state of the snapshot could not be determined. - STATE_NOT_KNOWN = 0; - - // The snapshot has been successfully created and can serve all requests. - READY = 1; - - // The snapshot is currently being created, and may be destroyed if the - // creation process encounters an error. A snapshot may not be restored to a - // table while it is being created. - CREATING = 2; - } - - // (`OutputOnly`) - // The unique name of the snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; - - // (`OutputOnly`) - // The source table at the time the snapshot was taken. - Table source_table = 2; - - // (`OutputOnly`) - // The size of the data in the source table at the time the snapshot was - // taken. In some cases, this value may be computed asynchronously via a - // background process and a placeholder of 0 will be used in the meantime. - int64 data_size_bytes = 3; - - // (`OutputOnly`) - // The time when the snapshot is created. - google.protobuf.Timestamp create_time = 4; - - // (`OutputOnly`) - // The time when the snapshot will be deleted. The maximum amount of time a - // snapshot can stay active is 365 days. If 'ttl' is not specified, - // the default maximum of 365 days will be used. - google.protobuf.Timestamp delete_time = 5; - - // (`OutputOnly`) - // The current state of the snapshot. - State state = 6; - - // (`OutputOnly`) - // Description of the snapshot. - string description = 7; -} diff --git a/bigtable/google/cloud/bigtable_v2/types.py b/bigtable/google/cloud/bigtable_v2/types.py deleted file mode 100644 index 607e1b09c5dd..000000000000 --- a/bigtable/google/cloud/bigtable_v2/types.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 -from google.protobuf import any_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - any_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - bigtable_pb2, - data_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/bigtable/noxfile.py b/bigtable/noxfile.py deleted file mode 100644 index 00c2b4793529..000000000000 --- a/bigtable/noxfile.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import os -import shutil - -import nox - - -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) - -@nox.session(python="3.7") -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", "black", *LOCAL_DEPS) - session.run( - "black", - "--check", - "google", - "tests", - "docs", - ) - session.run("flake8", "google", "tests") - - -@nox.session(python="3.6") -def blacken(session): - """Run black. - - Format code to uniform standard. - """ - session.install("black") - session.run( - "black", - "google", - "tests", - "docs", - ) - - -@nox.session(python="3.7") -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") - - -def default(session): - # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", ".") - - # Run py.test against the unit tests. - session.run( - "py.test", - "--quiet", - "--cov=google.cloud", - "--cov=tests.unit", - "--cov-append", - "--cov-config=.coveragerc", - "--cov-report=", - "--cov-fail-under=97", - os.path.join("tests", "unit"), - *session.posargs, - ) - - -@nox.session(python=["2.7", "3.5", "3.6", "3.7"]) -def unit(session): - """Run the unit test suite.""" - default(session) - - -@nox.session(python=["2.7", "3.7"]) -def system(session): - """Run the system test suite.""" - system_test_path = os.path.join("tests", "system.py") - system_test_folder_path = os.path.join("tests", "system") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") - - system_test_exists = os.path.exists(system_test_path) - system_test_folder_exists = os.path.exists(system_test_folder_path) - # Sanity check: only run tests if found. - if not system_test_exists and not system_test_folder_exists: - session.skip("System tests were not found") - - # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") - - # Install all test dependencies, then install this package into the - # virtualenv's dist-packages. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "../test_utils/") - session.install("-e", ".") - - # Run py.test against the system tests. - if system_test_exists: - session.run("py.test", "--quiet", system_test_path, *session.posargs) - if system_test_folder_exists: - session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) - - -@nox.session(python="3.7") -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") - - session.run("coverage", "erase") - -@nox.session(python="3.7") -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) - -@nox.session(python=['2.7', '3.7']) -def snippets(session): - """Run the documentation example snippets.""" - # Sanity check: Only run snippets system tests if the environment variable - # is set. - if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - session.skip('Credentials must be set via environment variable.') - - # Install all test dependencies, then install local packages in place. - session.install('mock', 'pytest') - for local_dep in LOCAL_DEPS: - session.install('-e', local_dep) - session.install('-e', '../test_utils/') - session.install('-e', '.') - session.run( - 'py.test', - '--quiet', - os.path.join('docs', 'snippets.py'), - *session.posargs - ) - session.run( - 'py.test', - '--quiet', - os.path.join('docs', 'snippets_table.py'), - *session.posargs - ) diff --git a/bigtable/pylint.config.py b/bigtable/pylint.config.py deleted file mode 100644 index 074e6626a6e6..000000000000 --- a/bigtable/pylint.config.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module is used to configure gcp-devrel-py-tools run-pylint.""" - -import copy - -from gcp_devrel.tools import pylint - -# Library configuration - -# library_additions = {} -library_replacements = copy.deepcopy(pylint.DEFAULT_LIBRARY_RC_REPLACEMENTS) -library_replacements['MASTER']['ignore'].append('_generated') - -# Test configuration - -# test_additions = copy.deepcopy(library_additions) -# test_replacements = copy.deepcopy(library_replacements) diff --git a/bigtable/releases.md b/bigtable/releases.md deleted file mode 120000 index 4c43d49320dc..000000000000 --- a/bigtable/releases.md +++ /dev/null @@ -1 +0,0 @@ -../../bigtable/CHANGELOG.md \ No newline at end of file diff --git a/bigtable/setup.cfg b/bigtable/setup.cfg deleted file mode 100644 index 3bd555500e37..000000000000 --- a/bigtable/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[bdist_wheel] -universal = 1 diff --git a/bigtable/setup.py b/bigtable/setup.py deleted file mode 100644 index c5075bbcc61b..000000000000 --- a/bigtable/setup.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -import setuptools - - -# Package metadata. - -name = 'google-cloud-bigtable' -description = 'Google Cloud Bigtable API client library' -version = '1.2.1' -# Should be one of: -# 'Development Status :: 3 - Alpha' -# 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Production/Stable' -release_status = 'Development Status :: 5 - Production/Stable' -dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", - "google-cloud-core >= 1.0.3, < 2.0dev", - "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", -] -extras = { -} - - -# Setup boilerplate below this line. - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -# Only include packages under the 'google' namespace. Do not include tests, -# benchmarks, etc. -packages = [ - package for package in setuptools.find_packages() - if package.startswith('google')] - -# Determine which namespaces are needed. -namespaces = ['google'] -if 'google.cloud' in packages: - namespaces.append('google.cloud') - - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author='Google LLC', - author_email='googleapis-packages@google.com', - license='Apache 2.0', - url='https://github.com/GoogleCloudPlatform/google-cloud-python', - classifiers=[ - release_status, - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Operating System :: OS Independent', - 'Topic :: Internet', - ], - platforms='Posix; MacOS X; Windows', - packages=packages, - namespace_packages=namespaces, - install_requires=dependencies, - extras_require=extras, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', - include_package_data=True, - zip_safe=False, -) diff --git a/bigtable/synth.metadata b/bigtable/synth.metadata deleted file mode 100644 index 1bedccbac19c..000000000000 --- a/bigtable/synth.metadata +++ /dev/null @@ -1,448 +0,0 @@ -{ - "updateTime": "2020-01-30T13:15:12.607903Z", - "sources": [ - { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c1246a29e22b0f98e800a536b5b0da2d933a55f2", - "internalRef": "292310790", - "log": "c1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n" - } - }, - { - "template": { - "name": "python_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" - } - } - ], - "destinations": [ - { - "client": { - "source": "googleapis", - "apiName": "bigtable", - "apiVersion": "v2", - "language": "python", - "generator": "gapic", - "config": "google/bigtable/artman_bigtable.yaml" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "bigtable_admin", - "apiVersion": "v2", - "language": "python", - "generator": "gapic", - "config": "google/bigtable/admin/artman_bigtableadmin.yaml" - } - } - ], - "newFiles": [ - { - "path": ".coveragerc" - }, - { - "path": ".flake8" - }, - { - "path": ".repo-metadata.json" - }, - { - "path": "CHANGELOG.md" - }, - { - "path": "LICENSE" - }, - { - "path": "MANIFEST.in" - }, - { - "path": "README.rst" - }, - { - "path": "docs/README.rst" - }, - { - "path": "docs/_static/custom.css" - }, - { - "path": "docs/_templates/layout.html" - }, - { - "path": "docs/changelog.md" - }, - { - "path": "docs/client-intro.rst" - }, - { - "path": "docs/client.rst" - }, - { - "path": "docs/cluster.rst" - }, - { - "path": "docs/column-family.rst" - }, - { - "path": "docs/conf.py" - }, - { - "path": "docs/data-api.rst" - }, - { - "path": "docs/index.rst" - }, - { - "path": "docs/instance-api.rst" - }, - { - "path": "docs/instance.rst" - }, - { - "path": "docs/row-data.rst" - }, - { - "path": "docs/row-filters.rst" - }, - { - "path": "docs/row.rst" - }, - { - "path": "docs/snippets.py" - }, - { - "path": "docs/snippets_table.py" - }, - { - "path": "docs/table-api.rst" - }, - { - "path": "docs/table.rst" - }, - { - "path": "docs/usage.rst" - }, - { - "path": "google/__init__.py" - }, - { - "path": "google/cloud/__init__.py" - }, - { - "path": "google/cloud/bigtable.py" - }, - { - "path": "google/cloud/bigtable/__init__.py" - }, - { - "path": "google/cloud/bigtable/app_profile.py" - }, - { - "path": "google/cloud/bigtable/batcher.py" - }, - { - "path": "google/cloud/bigtable/client.py" - }, - { - "path": "google/cloud/bigtable/cluster.py" - }, - { - "path": "google/cloud/bigtable/column_family.py" - }, - { - "path": "google/cloud/bigtable/enums.py" - }, - { - "path": "google/cloud/bigtable/instance.py" - }, - { - "path": "google/cloud/bigtable/policy.py" - }, - { - "path": "google/cloud/bigtable/row.py" - }, - { - "path": "google/cloud/bigtable/row_data.py" - }, - { - "path": "google/cloud/bigtable/row_filters.py" - }, - { - "path": "google/cloud/bigtable/row_set.py" - }, - { - "path": "google/cloud/bigtable/table.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/enums.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/types.py" - }, - { - "path": "google/cloud/bigtable_v2/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/bigtable_client.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/bigtable_client_config.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_pb2.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_admin.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/common.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/data_pb2.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/data_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/instance.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/table.proto" - }, - { - "path": "google/cloud/bigtable_v2/types.py" - }, - { - "path": "noxfile.py" - }, - { - "path": "pylint.config.py" - }, - { - "path": "setup.cfg" - }, - { - "path": "setup.py" - }, - { - "path": "synth.metadata" - }, - { - "path": "synth.py" - }, - { - "path": "tests/__init__.py" - }, - { - "path": "tests/system.py" - }, - { - "path": "tests/unit/__init__.py" - }, - { - "path": "tests/unit/_testing.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_client_v2.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" - }, - { - "path": "tests/unit/read-rows-acceptance-test.json" - }, - { - "path": "tests/unit/test_app_profile.py" - }, - { - "path": "tests/unit/test_batcher.py" - }, - { - "path": "tests/unit/test_client.py" - }, - { - "path": "tests/unit/test_cluster.py" - }, - { - "path": "tests/unit/test_column_family.py" - }, - { - "path": "tests/unit/test_instance.py" - }, - { - "path": "tests/unit/test_policy.py" - }, - { - "path": "tests/unit/test_row.py" - }, - { - "path": "tests/unit/test_row_data.py" - }, - { - "path": "tests/unit/test_row_filters.py" - }, - { - "path": "tests/unit/test_row_set.py" - }, - { - "path": "tests/unit/test_table.py" - } - ] -} \ No newline at end of file diff --git a/bigtable/synth.py b/bigtable/synth.py deleted file mode 100644 index 32ebc4af2eb1..000000000000 --- a/bigtable/synth.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script is used to synthesize generated parts of this library.""" - -import synthtool as s -from synthtool import gcp - -gapic = gcp.GAPICGenerator() -common = gcp.CommonTemplates() - -# ---------------------------------------------------------------------------- -# Generate bigtable and bigtable_admin GAPIC layer -# ---------------------------------------------------------------------------- -library = gapic.py_library( - "bigtable", - "v2", - config_path="/google/bigtable/artman_bigtable.yaml", - artman_output_name="bigtable-v2", - include_protos=True, -) - -s.move(library / "google/cloud/bigtable_v2") -s.move(library / "tests") - -# Generate admin client -library = gapic.py_library( - "bigtable_admin", - "v2", - config_path="/google/bigtable/admin/artman_bigtableadmin.yaml", - artman_output_name="bigtable-admin-v2", - include_protos=True, -) - -s.move(library / "google/cloud/bigtable_admin_v2") -s.move(library / "tests") - -s.replace( - [ - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - ], - "'google-cloud-bigtable-admin'", - "'google-cloud-bigtable'", -) - -s.replace( - "google/**/*.py", - "from google\.cloud\.bigtable\.admin_v2.proto", - "from google.cloud.bigtable_admin_v2.proto", -) - -s.replace( - ["google/cloud/bigtable_admin_v2/__init__.py"], - " __doc__ = bigtable_instance_admin_client." - "BigtableInstanceAdminClient.__doc__\n", - " __doc__ = (\n" - " bigtable_instance_admin_client.BigtableInstanceAdminClient." - "__doc__)\n", -) - -s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``true_mutations`` is empty, and at most\n\n\s*100000.", - "if ``true_mutations`` is empty, and at most 100000.", -) - -s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``false_mutations`` is empty, and at most\n\n\s*100000.", - "if ``false_mutations`` is empty, and at most 100000.", -) - -# ---------------------------------------------------------------------------- -# Add templated files -# ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=99) -s.move(templated_files, excludes=['noxfile.py']) - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/bigtable/tests/__init__.py b/bigtable/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/bigtable/tests/system.py b/bigtable/tests/system.py deleted file mode 100644 index e9e3ab79179e..000000000000 --- a/bigtable/tests/system.py +++ /dev/null @@ -1,1138 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import operator -import os -import unittest - -from google.api_core.exceptions import TooManyRequests -from google.cloud.environment_vars import BIGTABLE_EMULATOR -from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult -from test_utils.system import EmulatorCreds -from test_utils.system import unique_resource_id - -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import UTC -from google.cloud.bigtable.client import Client -from google.cloud.bigtable.column_family import MaxVersionsGCRule -from google.cloud.bigtable.policy import Policy -from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE -from google.cloud.bigtable.row_filters import ApplyLabelFilter -from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter -from google.cloud.bigtable.row_filters import RowFilterChain -from google.cloud.bigtable.row_filters import RowFilterUnion -from google.cloud.bigtable.row_data import Cell -from google.cloud.bigtable.row_data import PartialRowData -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client_config as table_admin_config, -) - -UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = "us-central1-c" -INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX -INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX -TABLE_ID = "google-cloud-python-test-table" -CLUSTER_ID = INSTANCE_ID + "-cluster" -CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" -SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = u"col-fam-id1" -COLUMN_FAMILY_ID2 = u"col-fam-id2" -COL_NAME1 = b"col-name1" -COL_NAME2 = b"col-name2" -COL_NAME3 = b"col-name3-but-other-fam" -CELL_VAL1 = b"cell-val" -CELL_VAL2 = b"cell-val-newer" -CELL_VAL3 = b"altcol-cell-val" -CELL_VAL4 = b"foo" -ROW_KEY = b"row-key" -ROW_KEY_ALT = b"row-key-alt" -EXISTING_INSTANCES = [] -LABEL_KEY = u"python-system" -label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(label_stamp)} - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - INSTANCE_DATA = None - CLUSTER = None - CLUSTER_DATA = None - IN_EMULATOR = False - - -def _retry_on_unavailable(exc): - """Retry only errors whose status code is 'UNAVAILABLE'.""" - from grpc import StatusCode - - return exc.code() == StatusCode.UNAVAILABLE - - -retry_429 = RetryErrors(TooManyRequests, max_tries=9) - - -def setUpModule(): - from google.cloud.exceptions import GrpcRendezvous - from google.cloud.bigtable.enums import Instance - - # See: https://github.com/googleapis/google-cloud-python/issues/5928 - interfaces = table_admin_config.config["interfaces"] - iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - methods = iface_config["methods"] - create_table = methods["CreateTable"] - create_table["timeout_millis"] = 90000 - - Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None - - if Config.IN_EMULATOR: - credentials = EmulatorCreds() - Config.CLIENT = Client(admin=True, credentials=credentials) - else: - Config.CLIENT = Client(admin=True) - - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES - ) - Config.INSTANCE_DATA = Config.CLIENT.instance( - INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS - ) - Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID - ) - - if not Config.IN_EMULATOR: - retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) - instances, failed_locations = retry(Config.CLIENT.list_instances)() - - if len(failed_locations) != 0: - raise ValueError("List instances failed in module set up.") - - EXISTING_INSTANCES[:] = instances - - # After listing, create the test instances. - admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - admin_op.result(timeout=10) - data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - data_op.result(timeout=10) - - -def tearDownModule(): - if not Config.IN_EMULATOR: - retry_429(Config.INSTANCE.delete)() - retry_429(Config.INSTANCE_DATA.delete)() - - -class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): - if Config.IN_EMULATOR: - self.skipTest("Instance Admin API not supported in emulator") - self.instances_to_delete = [] - - def tearDown(self): - for instance in self.instances_to_delete: - retry_429(instance.delete)() - - def test_list_instances(self): - instances, failed_locations = Config.CLIENT.list_instances() - - self.assertEqual(failed_locations, []) - - found = set([instance.name for instance in instances]) - self.assertTrue(Config.INSTANCE.name in found) - - def test_reload(self): - from google.cloud.bigtable import enums - - # Use same arguments as Config.INSTANCE (created in `setUpModule`) - # so we can use reload() on a fresh instance. - alt_instance = Config.CLIENT.instance(INSTANCE_ID) - # Make sure metadata unset before reloading. - alt_instance.display_name = None - - alt_instance.reload() - self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) - self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) - self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance_defaults(self): - from google.cloud.bigtable import enums - - ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES - ) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - # Make sure that by default a PRODUCTION type instance is created - self.assertIsNone(instance.type_) - self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance(self): - from google.cloud.bigtable import enums - - _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - - ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance_alt.labels, LABELS) - self.assertEqual(instance_alt.state, enums.Instance.State.READY) - - def test_cluster_exists(self): - NONEXISTING_CLUSTER_ID = "cluster-id" - - cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) - self.assertTrue(cluster.exists()) - self.assertFalse(alt_cluster.exists()) - - def test_instance_exists(self): - NONEXISTING_INSTANCE_ID = "instancer-id" - - alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) - self.assertTrue(Config.INSTANCE.exists()) - self.assertFalse(alt_instance.exists()) - - def test_create_instance_w_two_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - def test_update_display_name_and_labels(self): - OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = "Foo Bar Baz" - n_label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") - ) - - NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} - Config.INSTANCE.display_name = NEW_DISPLAY_NAME - Config.INSTANCE.labels = NEW_LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, LABELS) - instance_alt.reload() - self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, NEW_LABELS) - - # Make sure to put the instance back the way it was for the - # other test cases. - Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.labels = LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - def test_update_type(self): - from google.cloud.bigtable.enums import Instance - - _DEVELOPMENT = Instance.Type.DEVELOPMENT - _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Unset the display_name - instance.display_name = None - - instance.type_ = _PRODUCTION - operation = instance.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - self.assertIsNone(instance_alt.type_) - instance_alt.reload() - self.assertEqual(instance_alt.type_, _PRODUCTION) - - def test_update_cluster(self): - NEW_SERVE_NODES = 4 - - Config.CLUSTER.serve_nodes = NEW_SERVE_NODES - - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - operation.result(timeout=10) - - # Create a new cluster instance and reload it. - alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster.reload() - self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) - - # Make sure to put the cluster back the way it was for the - # other test cases. - Config.CLUSTER.serve_nodes = SERVE_NODES - operation = Config.CLUSTER.update() - operation.result(timeout=10) - - def test_create_cluster(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - ALT_CLUSTER_ID = INSTANCE_ID + "-c2" - ALT_LOCATION_ID = "us-central1-f" - ALT_SERVE_NODES = 4 - - cluster_2 = Config.INSTANCE.cluster( - ALT_CLUSTER_ID, - location_id=ALT_LOCATION_ID, - serve_nodes=ALT_SERVE_NODES, - default_storage_type=(StorageType.SSD), - ) - operation = cluster_2.create() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new object instance, reload and make sure it is the same. - alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) - alt_cluster.reload() - - self.assertEqual(cluster_2, alt_cluster) - self.assertEqual(cluster_2.location_id, alt_cluster.location_id) - self.assertEqual(alt_cluster.state, Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster.default_storage_type - ) - - # Delete the newly created cluster and confirm - self.assertTrue(cluster_2.exists()) - cluster_2.delete() - self.assertFalse(cluster_2.exists()) - - def _test_create_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - app_profile = app_profile.create(ignore_warnings=ignore_warnings) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - - self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) - self.assertEqual(app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(app_profile.description, alt_app_profile.description) - self.assertFalse(app_profile.allow_transactional_writes) - self.assertFalse(alt_app_profile.allow_transactional_writes) - - def _test_list_app_profiles_helper(self, instance, app_profile_ids): - app_profiles = instance.list_app_profiles() - found = [app_prof.app_profile_id for app_prof in app_profiles] - for app_profile_id in app_profile_ids: - self.assertTrue(app_profile_id in found) - - def _test_modify_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - - operation = app_profile.update(ignore_warnings) - operation.result(timeout=30) - - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - self.assertEqual(alt_app_profile.description, description) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.cluster_id, cluster_id) - self.assertEqual( - alt_app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def _test_delete_app_profile_helper(self, app_profile_id, instance): - app_profile = instance.app_profile(app_profile_id) - self.assertTrue(app_profile.exists()) - app_profile.delete(ignore_warnings=True) - self.assertFalse(app_profile.exists()) - - -class TestTableAdminAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = Config.INSTANCE_DATA.table(TABLE_ID) - cls._table.create() - - @classmethod - def tearDownClass(cls): - cls._table.delete() - - def setUp(self): - self.tables_to_delete = [] - - def tearDown(self): - for table in self.tables_to_delete: - table.delete() - - def test_list_tables(self): - # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the - # table created in `setUpClass` here will be the only one. - tables = Config.INSTANCE_DATA.list_tables() - self.assertEqual(tables, [self._table]) - - def test_exists(self): - retry_until_true = RetryResult(lambda result: result) - retry_until_false = RetryResult(lambda result: not result) - temp_table_id = "test-table_existence" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - self.assertFalse(temp_table.exists()) - temp_table.create() - self.assertTrue(retry_until_true(temp_table.exists)()) - temp_table.delete() - self.assertFalse(retry_until_false(temp_table.exists)()) - - def test_create_table(self): - temp_table_id = "test-create-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - # First, create a sorted version of our expected result. - name_attr = operator.attrgetter("name") - expected_tables = sorted([temp_table, self._table], key=name_attr) - - # Then query for the tables in the instance and sort them by - # name as well. - tables = Config.INSTANCE_DATA.list_tables() - sorted_tables = sorted(tables, key=name_attr) - self.assertEqual(sorted_tables, expected_tables) - - def test_test_iam_permissions(self): - temp_table_id = "test-test-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - permissions_allowed = temp_table.test_iam_permissions(permissions) - self.assertEqual(permissions, permissions_allowed) - - def test_get_iam_policy(self): - temp_table_id = "test-get-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - policy = temp_table.get_iam_policy().to_api_repr() - self.assertEqual(policy["etag"], "ACAB") - self.assertEqual(policy["version"], 0) - - def test_set_iam_policy(self): - temp_table_id = "test-set-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - new_policy = Policy() - service_account_email = Config.CLIENT._credentials.service_account_email - new_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.service_account(service_account_email) - ] - policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() - - self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") - self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) - - def test_create_table_with_families(self): - temp_table_id = "test-create-table-with-failies" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - gc_rule = MaxVersionsGCRule(1) - temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) - self.tables_to_delete.append(temp_table) - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, temp_table) - self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_create_table_with_split_keys(self): - temp_table_id = "foo-bar-baz-split-table" - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create(initial_split_keys=initial_split_keys) - self.tables_to_delete.append(temp_table) - - # Read Sample Row Keys for created splits - sample_row_keys = temp_table.sample_row_keys() - actual_keys = [srk.row_key for srk in sample_row_keys] - - expected_keys = initial_split_keys - expected_keys.append(b"") - - self.assertEqual(actual_keys, expected_keys) - - def test_create_column_family(self): - temp_table_id = "test-create-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, column_family._table) - self.assertEqual( - retrieved_col_fam.column_family_id, column_family.column_family_id - ) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_update_column_family(self): - temp_table_id = "test-update-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - # Check that our created table is as expected. - col_fams = temp_table.list_column_families() - self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) - - # Update the column family's GC rule and then try to update. - column_family.gc_rule = None - column_family.update() - - # Check that the update has propagated. - col_fams = temp_table.list_column_families() - self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) - - def test_delete_column_family(self): - temp_table_id = "test-delete-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1) - column_family.create() - - # Make sure the family is there before deleting it. - col_fams = temp_table.list_column_families() - self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) - - column_family.delete() - # Make sure we have successfully deleted it. - self.assertEqual(temp_table.list_column_families(), {}) - - -class TestDataAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = table = Config.INSTANCE_DATA.table("test-data-api") - table.create() - table.column_family(COLUMN_FAMILY_ID1).create() - table.column_family(COLUMN_FAMILY_ID2).create() - - @classmethod - def tearDownClass(cls): - # Will also delete any data contained in the table. - cls._table.delete() - - def _maybe_emulator_skip(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def setUp(self): - self.rows_to_delete = [] - - def tearDown(self): - for row in self.rows_to_delete: - row.clear() - row.delete() - row.commit() - - def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): - timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) - timestamp1_micros = _microseconds_from_datetime(timestamp1) - # Truncate to millisecond granularity. - timestamp1_micros -= timestamp1_micros % 1000 - timestamp1 = _datetime_from_microseconds(timestamp1_micros) - # 1000 microseconds is a millisecond - timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) - timestamp2_micros = _microseconds_from_datetime(timestamp2) - timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) - timestamp3_micros = _microseconds_from_datetime(timestamp3) - timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) - timestamp4_micros = _microseconds_from_datetime(timestamp4) - - if row1 is not None: - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) - if row2 is not None: - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) - if row3 is not None: - row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) - if row4 is not None: - row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) - - # Create the cells we will check. - cell1 = Cell(CELL_VAL1, timestamp1_micros) - cell2 = Cell(CELL_VAL2, timestamp2_micros) - cell3 = Cell(CELL_VAL3, timestamp3_micros) - cell4 = Cell(CELL_VAL4, timestamp4_micros) - return cell1, cell2, cell3, cell4 - - def test_timestamp_filter_millisecond_granularity(self): - from google.cloud.bigtable import row_filters - - end = datetime.datetime.now() - start = end - datetime.timedelta(minutes=60) - timestamp_range = row_filters.TimestampRange(start=start, end=end) - timefilter = row_filters.TimestampRangeFilter(timestamp_range) - row_data = self._table.read_rows(filter_=timefilter) - row_data.consume_all() - - def test_mutate_rows(self): - row1 = self._table.row(ROW_KEY) - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row1.commit() - self.rows_to_delete.append(row1) - row2 = self._table.row(ROW_KEY_ALT) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) - row2.commit() - self.rows_to_delete.append(row2) - - # Change the contents - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) - rows = [row1, row2] - statuses = self._table.mutate_rows(rows) - result = [status.code for status in statuses] - expected_result = [0, 0] - self.assertEqual(result, expected_result) - - # Check the contents - row1_data = self._table.read_row(ROW_KEY) - self.assertEqual( - row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 - ) - row2_data = self._table.read_row(ROW_KEY_ALT) - self.assertEqual( - row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 - ) - - def test_truncate_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.truncate(timeout=200) - - read_rows = self._table.yield_rows() - - for row in read_rows: - self.assertNotIn(row.row_key.decode("utf-8"), row_keys) - - def test_drop_by_prefix_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) - - read_rows = self._table.yield_rows() - expected_rows_count = 5 - read_rows_count = 0 - - for row in read_rows: - if row.row_key in row_keys: - read_rows_count += 1 - - self.assertEqual(expected_rows_count, read_rows_count) - - def test_yield_rows_with_row_set(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - b"row_key_7", - b"row_key_8", - b"row_key_9", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - row_set.add_row_key(b"row_key_1") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_read_large_cell_limit(self): - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - number_of_bytes = 10 * 1024 * 1024 - data = b"1" * number_of_bytes # 10MB of 1's. - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - cell = partial_row_data.cells[COLUMN_FAMILY_ID1] - column = cell[COL_NAME1] - self.assertEqual(len(column), 1) - self.assertEqual(column[0].value, data) - - def test_read_row(self): - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - # Check the cells match. - ts_attr = operator.attrgetter("timestamp") - expected_row_contents = { - COLUMN_FAMILY_ID1: { - COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), - COL_NAME2: [cell3], - }, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - self.assertEqual(partial_row_data.cells, expected_row_contents) - - def test_read_rows(self): - row = self._table.row(ROW_KEY) - row_alt = self._table.row(ROW_KEY_ALT) - self.rows_to_delete.extend([row, row_alt]) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) - row.commit() - row_alt.commit() - - rows_data = self._table.read_rows() - self.assertEqual(rows_data.rows, {}) - rows_data.consume_all() - - # NOTE: We should refrain from editing protected data on instances. - # Instead we should make the values public or provide factories - # for constructing objects with them. - row_data = PartialRowData(ROW_KEY) - row_data._chunks_encountered = True - row_data._committed = True - row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} - - row_alt_data = PartialRowData(ROW_KEY_ALT) - row_alt_data._chunks_encountered = True - row_alt_data._committed = True - row_alt_data._cells = { - COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - - expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} - self.assertEqual(rows_data.rows, expected_rows) - - def test_read_with_label_applied(self): - self._maybe_emulator_skip("Labels not supported by Bigtable emulator") - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, _, cell3, _ = self._write_to_row(row, None, row) - row.commit() - - # Combine a label with column 1. - label1 = u"label-red" - label1_filter = ApplyLabelFilter(label1) - col1_filter = ColumnQualifierRegexFilter(COL_NAME1) - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Combine a label with column 2. - label2 = u"label-blue" - label2_filter = ApplyLabelFilter(label2) - col2_filter = ColumnQualifierRegexFilter(COL_NAME2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - cells_returned = partial_row_data.cells - col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) - # Make sure COLUMN_FAMILY_ID1 was the only key. - self.assertEqual(len(cells_returned), 0) - - (cell1_new,) = col_fam1.pop(COL_NAME1) - (cell3_new,) = col_fam1.pop(COL_NAME2) - # Make sure COL_NAME1 and COL_NAME2 were the only keys. - self.assertEqual(len(col_fam1), 0) - - # Check that cell1 has matching values and gained a label. - self.assertEqual(cell1_new.value, cell1.value) - self.assertEqual(cell1_new.timestamp, cell1.timestamp) - self.assertEqual(cell1.labels, []) - self.assertEqual(cell1_new.labels, [label1]) - - # Check that cell3 has matching values and gained a label. - self.assertEqual(cell3_new.value, cell3.value) - self.assertEqual(cell3_new.timestamp, cell3.timestamp) - self.assertEqual(cell3.labels, []) - self.assertEqual(cell3_new.labels, [label2]) - - def test_access_with_non_admin_client(self): - client = Client(admin=False) - instance = client.instance(INSTANCE_ID_DATA) - table = instance.table(self._table.table_id) - self.assertIsNone(table.read_row("nonesuch")) diff --git a/bigtable/tests/unit/__init__.py b/bigtable/tests/unit/__init__.py deleted file mode 100644 index df379f1e9d88..000000000000 --- a/bigtable/tests/unit/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/bigtable/tests/unit/_testing.py b/bigtable/tests/unit/_testing.py deleted file mode 100644 index 302d33ac1540..000000000000 --- a/bigtable/tests/unit/_testing.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Mocks used to emulate gRPC generated objects.""" - - -import mock - - -class _FakeStub(object): - """Acts as a gPRC stub.""" - - def __init__(self, *results): - self.results = results - self.method_calls = [] - - -def _make_credentials(): - import google.auth.credentials - - class _CredentialsWithScopes( - google.auth.credentials.Credentials, google.auth.credentials.Scoped - ): - pass - - return mock.Mock(spec=_CredentialsWithScopes) diff --git a/bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef5a0..000000000000 --- a/bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index e1de090542c6..000000000000 --- a/bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,937 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2 import enums -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name_2 = "displayName21615000987" - expected_response = {"name": name_2, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - display_name = "displayName1615086568" - type_ = enums.Instance.Type.TYPE_UNSPECIFIED - labels = {} - - response = client.update_instance(name, display_name, type_, labels) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance( - name=name, display_name=display_name, type=type_, labels=labels - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - display_name = "displayName1615086568" - type_ = enums.Instance.Type.TYPE_UNSPECIFIED - labels = {} - - with pytest.raises(CustomException): - client.update_instance(name, display_name, type_, labels) - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_update_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - serve_nodes = 1288838783 - - response = client.update_cluster(name, serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(name=name, serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - serve_nodes = 1288838783 - - response = client.update_cluster(name, serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - client.delete_app_profile(name, ignore_warnings) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - - with pytest.raises(CustomException): - client.delete_app_profile(name, ignore_warnings) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index d1a843164982..000000000000 --- a/bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,760 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = "sourceSnapshot-947679896" - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = "sourceSnapshot-947679896" - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description_2 = "description2568623279" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description_2, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = "cluster872092154" - snapshot_id = "snapshotId-168585866" - description = "description-1724546052" - - response = client.snapshot_table(name, cluster, snapshot_id, description) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id, description=description - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = "cluster872092154" - snapshot_id = "snapshotId-168585866" - description = "description-1724546052" - - response = client.snapshot_table(name, cluster, snapshot_id, description) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) diff --git a/bigtable/tests/unit/read-rows-acceptance-test.json b/bigtable/tests/unit/read-rows-acceptance-test.json deleted file mode 100644 index cfa8a17f327b..000000000000 --- a/bigtable/tests/unit/read-rows-acceptance-test.json +++ /dev/null @@ -1,1205 +0,0 @@ -{ - "tests": [ - { - "name": "invalid - no commit", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - no cell key before commit", - "chunks": [ - "commit_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - no cell key before value", - "chunks": [ - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - new col family must specify qualifier", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "bare commit implies ts=0", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "commit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - } - ] - }, - { - "name": "simple row with timestamp", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "missing timestamp, implied ts=0", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "empty cell value", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - } - ] - }, - { - "name": "two unsplit cells", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "two qualifiers", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "two families", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "with labels", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "L_1", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "L_2", - "error": false - } - ] - }, - { - "name": "split cell, bare commit", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL\"\ncommit_row: false\n", - "commit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - } - ] - }, - { - "name": "split cell", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "split four ways", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"ue-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "L", - "error": false - } - ] - }, - { - "name": "two split cells", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "multi-qualifier splits", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "multi-qualifier multi-split", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"lue-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"lue-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "multi-family split", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "invalid - no commit between rows", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - no commit after first row", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - last row missing commit", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - duplicate row key", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - new row missing row key", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "two rows", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows implicit timestamp", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows empty value", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows, one with multiple cells", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows, multiple cells", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK_1", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "B", - "qual": "E", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "B", - "qual": "F", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows, multiple cells, multiple families", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK_1", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "M", - "qual": "O", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "N", - "qual": "P", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows, four cells, 2 labels", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "L_1", - "error": false - }, - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 103, - "value": "value-VAL_3", - "label": "L_3", - "error": false - }, - { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false - } - ] - }, - { - "name": "two rows with splits, same timestamp", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "invalid - bare reset", - "chunks": [ - "reset_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - bad reset, no commit", - "chunks": [ - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - missing key after reset", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n", - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "no data after reset", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n" - ], - "results": null - }, - { - "name": "simple reset", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - } - ] - }, - { - "name": "reset to new val", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "reset to new qual", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false - } - ] - }, - { - "name": "reset with splits", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "reset two cells", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false - } - ] - }, - { - "name": "two resets", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_3", - "label": "", - "error": false - } - ] - }, - { - "name": "reset then two cells", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "B", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "B", - "qual": "D", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false - } - ] - }, - { - "name": "reset to new row", - "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - }, - { - "name": "reset in between chunks", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false - } - ] - }, - { - "name": "invalid - reset with chunk", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\nreset_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "invalid - commit with chunk", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" - ], - "results": [ - { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", - "error": true - } - ] - }, - { - "name": "empty cell chunk", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "commit_row: false\n", - "commit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false - } - ] - }, - { - "name": "empty second qualifier", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 99\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"\"\n\u003e\ntimestamp_micros: 98\nvalue: \"value-VAL_2\"\ncommit_row: true\n" - ], - "results": [ - { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 99, - "value": "value-VAL_1", - "label": "", - "error": false - }, - { - "rk": "RK", - "fm": "A", - "qual": "", - "ts": 98, - "value": "value-VAL_2", - "label": "", - "error": false - } - ] - } - ] -} diff --git a/bigtable/tests/unit/test_app_profile.py b/bigtable/tests/unit/test_app_profile.py deleted file mode 100644 index f7ec0a85511f..000000000000 --- a/bigtable/tests/unit/test_app_profile.py +++ /dev/null @@ -1,672 +0,0 @@ -# Copyright 2018 Google LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class TestAppProfile(unittest.TestCase): - - PROJECT = "project" - INSTANCE_ID = "instance-id" - APP_PROFILE_ID = "app-profile-id" - APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format( - PROJECT, INSTANCE_ID, APP_PROFILE_ID - ) - CLUSTER_ID = "cluster-id" - OP_ID = 8765 - OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format( - PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID - ) - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.app_profile import AppProfile - - return AppProfile - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - self.assertIsInstance(app_profile, self._get_target_class()) - self.assertEqual(app_profile._instance, instance) - self.assertIsNone(app_profile.routing_policy_type) - self.assertIsNone(app_profile.description) - self.assertIsNone(app_profile.cluster_id) - self.assertIsNone(app_profile.allow_transactional_writes) - - def test_constructor_non_defaults(self): - from google.cloud.bigtable.enums import RoutingPolicyType - - ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = "routing policy any" - APP_PROFILE_ID_2 = "app-profile-id-2" - SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = "routing policy single" - ALLOW_WRITES = True - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile1 = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=ANY, - description=DESCRIPTION_1, - ) - app_profile2 = self._make_one( - APP_PROFILE_ID_2, - instance, - routing_policy_type=SINGLE, - description=DESCRIPTION_2, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=ALLOW_WRITES, - ) - self.assertEqual(app_profile1.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(app_profile1._instance, instance) - self.assertEqual(app_profile1.routing_policy_type, ANY) - self.assertEqual(app_profile1.description, DESCRIPTION_1) - self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) - self.assertIs(app_profile2._instance, instance) - self.assertEqual(app_profile2.routing_policy_type, SINGLE) - self.assertEqual(app_profile2.description, DESCRIPTION_2) - self.assertEqual(app_profile2.cluster_id, self.CLUSTER_ID) - self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) - - def test_name_property(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - self.assertEqual(app_profile.name, self.APP_PROFILE_NAME) - - def test___eq__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) - self.assertTrue(app_profile1 == app_profile2) - - def test___eq__type_instance_differ(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - alt_instance = _Instance("other-instance", client) - other_object = _Other(self.APP_PROFILE_ID, instance) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, alt_instance) - self.assertFalse(app_profile1 == other_object) - self.assertFalse(app_profile1 == app_profile2) - - def test___ne__same_value(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) - self.assertFalse(app_profile1 != app_profile2) - - def test___ne__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one("app_profile_id1", instance) - app_profile2 = self._make_one("app_profile_id2", instance) - self.assertTrue(app_profile1 != app_profile2) - - def test_from_pb_success_routing_any(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - desctiption = "routing any" - routing = RoutingPolicyType.ANY - multi_cluster_routing_use_any = ( - data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() - ) - - app_profile_pb = data_v2_pb2.AppProfile( - name=self.APP_PROFILE_NAME, - description=desctiption, - multi_cluster_routing_use_any=multi_cluster_routing_use_any, - ) - - klass = self._get_target_class() - app_profile = klass.from_pb(app_profile_pb, instance) - self.assertIsInstance(app_profile, klass) - self.assertIs(app_profile._instance, instance) - self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) - self.assertEqual(app_profile.description, desctiption) - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertIsNone(app_profile.cluster_id) - self.assertEqual(app_profile.allow_transactional_writes, False) - - def test_from_pb_success_routing_single(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - desctiption = "routing single" - allow_transactional_writes = True - routing = RoutingPolicyType.SINGLE - single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_transactional_writes, - ) - - app_profile_pb = data_v2_pb2.AppProfile( - name=self.APP_PROFILE_NAME, - description=desctiption, - single_cluster_routing=single_cluster_routing, - ) - - klass = self._get_target_class() - app_profile = klass.from_pb(app_profile_pb, instance) - self.assertIsInstance(app_profile, klass) - self.assertIs(app_profile._instance, instance) - self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) - self.assertEqual(app_profile.description, desctiption) - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertEqual(app_profile.cluster_id, self.CLUSTER_ID) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def test_from_pb_bad_app_profile_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - bad_app_profile_name = "BAD_NAME" - - app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, None) - - def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - ALT_INSTANCE_ID = "ALT_INSTANCE_ID" - client = _Client(self.PROJECT) - instance = _Instance(ALT_INSTANCE_ID, client) - self.assertEqual(instance.instance_id, ALT_INSTANCE_ID) - - app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, instance) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - self.assertEqual(client.project, ALT_PROJECT) - - app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, instance) - - def test_reload_routing_any(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - - routing = RoutingPolicyType.ANY - description = "routing policy any" - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - ) - - # Create response_pb - description_from_server = "routing policy switched to single" - cluster_id_from_server = self.CLUSTER_ID - allow_transactional_writes = True - single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id_from_server, - allow_transactional_writes=allow_transactional_writes, - ) - - response_pb = data_v2_pb2.AppProfile( - name=app_profile.name, - single_cluster_routing=single_cluster_routing, - description=description_from_server, - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_stub = client._instance_admin_client.transport - instance_stub.get_app_profile.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check app_profile config values before. - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertEqual(app_profile.description, description) - self.assertIsNone(app_profile.cluster_id) - self.assertIsNone(app_profile.allow_transactional_writes) - - # Perform the method and check the result. - result = app_profile.reload() - self.assertEqual(result, expected_result) - self.assertEqual(app_profile.routing_policy_type, RoutingPolicyType.SINGLE) - self.assertEqual(app_profile.description, description_from_server) - self.assertEqual(app_profile.cluster_id, cluster_id_from_server) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.api_core import exceptions - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - # Create response_pb - response_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - client._instance_admin_client = instance_api - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport - instance_stub.get_app_profile.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_app_profile_id = "other-app-profile-id" - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - alt_app_profile = self._make_one(non_existing_app_profile_id, instance) - self.assertTrue(app_profile.exists()) - self.assertFalse(alt_app_profile.exists()) - with self.assertRaises(exceptions.BadRequest): - alt_app_profile.exists() - - def test_create_routing_any(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.ANY - description = "routing policy any" - ignore_warnings = True - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - ) - expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - client._instance_admin_client = instance_api - # Perform the method and check the result. - result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, self._get_target_class()) - self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(result._instance, instance) - self.assertEqual(result.routing_policy_type, routing) - self.assertEqual(result.description, description) - self.assertEqual(result.allow_transactional_writes, False) - self.assertIsNone(result.cluster_id) - - def test_create_routing_single(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.SINGLE - description = "routing policy single" - allow_writes = False - ignore_warnings = True - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes, - ) - expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - client._instance_admin_client = instance_api - # Perform the method and check the result. - result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, self._get_target_class()) - self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(result._instance, instance) - self.assertEqual(result.routing_policy_type, routing) - self.assertEqual(result.description, description) - self.assertEqual(result.allow_transactional_writes, allow_writes) - self.assertEqual(result.cluster_id, self.CLUSTER_ID) - - def test_create_app_profile_with_wrong_routing_policy(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=None - ) - with self.assertRaises(ValueError): - app_profile.create() - - def test_update_app_profile_routing_any(self): - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.protobuf import field_mask_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.SINGLE - description = "to routing policy single" - allow_writes = True - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes, - ) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - # Mock api calls - client._instance_admin_client = instance_api - - # Perform the method and check the result. - ignore_warnings = True - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=["description", "single_cluster_routing"] - ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) - - result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) - - def test_update_app_profile_routing_single(self): - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.protobuf import field_mask_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.ANY - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=routing - ) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - # Mock api calls - client._instance_admin_client = instance_api - - # Perform the method and check the result. - ignore_warnings = True - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=["multi_cluster_routing_use_any"] - ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) - - result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) - - def test_update_app_profile_with_wrong_routing_policy(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=None - ) - with self.assertRaises(ValueError): - app_profile.update() - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport - instance_stub.delete_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = app_profile.delete() - - self.assertEqual(result, expected_result) - - -class _Client(object): - def __init__(self, project): - self.project = project - self.project_name = "projects/" + self.project - self._operations_stub = mock.sentinel.operations_stub - - def __eq__(self, other): - return other.project == self.project and other.project_name == self.project_name - - -class _Instance(object): - def __init__(self, instance_id, client): - self.instance_id = instance_id - self._client = client - - def __eq__(self, other): - return other.instance_id == self.instance_id and other._client == self._client - - -class _Other(object): - def __init__(self, app_profile_id, instance): - self.app_profile_id = app_profile_id - self._instance = instance diff --git a/bigtable/tests/unit/test_batcher.py b/bigtable/tests/unit/test_batcher.py deleted file mode 100644 index 8760c3a2de2c..000000000000 --- a/bigtable/tests/unit/test_batcher.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials - -from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.row import DirectRow - - -class TestMutationsBatcher(unittest.TestCase): - from grpc import StatusCode - - TABLE_ID = "table-id" - TABLE_NAME = "/tables/" + TABLE_ID - - # RPC Status Codes - SUCCESS = StatusCode.OK.value[0] - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.table import Table - - return Table - - def _make_table(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - - instance = client.instance(instance_id="instance-id") - table = self._make_table(self.TABLE_ID, instance) - - mutation_batcher = MutationsBatcher(table) - self.assertEqual(table, mutation_batcher.table) - - def test_mutate_row(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] - - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() - - self.assertEqual(table.mutation_calls, 1) - - def test_mutate_rows(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - mutation_batcher.mutate(row) - - mutation_batcher.flush() - - self.assertEqual(table.mutation_calls, 1) - - def test_flush_with_no_rows(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - mutation_batcher.flush() - - self.assertEqual(table.mutation_calls, 0) - - def test_add_row_with_max_flush_count(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, flush_count=3) - - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") - - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) - - self.assertEqual(table.mutation_calls, 1) - - @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) - def test_mutate_row_with_max_mutations_failure(self): - from google.cloud.bigtable.batcher import MaxMutationsError - - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - with self.assertRaises(MaxMutationsError): - mutation_batcher.mutate(row) - - @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) - def test_mutate_row_with_max_mutations(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - - mutation_batcher.mutate(row) - mutation_batcher.flush() - - self.assertEqual(table.mutation_calls, 1) - - def test_mutate_row_with_max_row_bytes(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) - - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) - - mutation_batcher.mutate(row) - - self.assertEqual(table.mutation_calls, 1) - - -class _Instance(object): - def __init__(self, client=None): - self._client = client - - -class _Table(object): - def __init__(self, name, client=None): - self.name = name - self._instance = _Instance(client) - self.mutation_calls = 0 - - def mutate_rows(self, rows): - self.mutation_calls += 1 - return rows diff --git a/bigtable/tests/unit/test_client.py b/bigtable/tests/unit/test_client.py deleted file mode 100644 index 8a2ef3c64b56..000000000000 --- a/bigtable/tests/unit/test_client.py +++ /dev/null @@ -1,556 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials - - -class Test__create_gapic_client(unittest.TestCase): - def _invoke_client_factory(self, client_class, **kw): - from google.cloud.bigtable.client import _create_gapic_client - - return _create_gapic_client(client_class, **kw) - - def test_wo_emulator(self): - client_class = mock.Mock() - credentials = _make_credentials() - client = _Client(credentials) - client_info = client._client_info = mock.Mock() - - result = self._invoke_client_factory(client_class)(client) - - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - credentials=client._credentials, - client_info=client_info, - client_options=None, - ) - - def test_wo_emulator_w_client_options(self): - client_class = mock.Mock() - credentials = _make_credentials() - client = _Client(credentials) - client_info = client._client_info = mock.Mock() - client_options = mock.Mock() - - result = self._invoke_client_factory( - client_class, client_options=client_options - )(client) - - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - credentials=client._credentials, - client_info=client_info, - client_options=client_options, - ) - - def test_w_emulator(self): - client_class = mock.Mock() - emulator_host = emulator_channel = object() - credentials = _make_credentials() - client = _Client( - credentials, emulator_host=emulator_host, emulator_channel=emulator_channel - ) - client_info = client._client_info = mock.Mock() - - result = self._invoke_client_factory(client_class)(client) - - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - channel=client._emulator_channel, client_info=client_info - ) - - -class _Client(object): - def __init__(self, credentials, emulator_host=None, emulator_channel=None): - self._credentials = credentials - self._emulator_host = emulator_host - self._emulator_channel = emulator_channel - - -class TestClient(unittest.TestCase): - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - DISPLAY_NAME = "display-name" - USER_AGENT = "you-sir-age-int" - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - from google.cloud.bigtable.client import _CLIENT_INFO - from google.cloud.bigtable.client import DATA_SCOPE - - credentials = _make_credentials() - - with mock.patch("google.auth.default") as mocked: - mocked.return_value = credentials, self.PROJECT - client = self._make_one() - - self.assertEqual(client.project, self.PROJECT) - self.assertIs(client._credentials, credentials.with_scopes.return_value) - self.assertFalse(client._read_only) - self.assertFalse(client._admin) - self.assertIs(client._client_info, _CLIENT_INFO) - self.assertIsNone(client._channel) - self.assertIsNone(client._emulator_host) - self.assertIsNone(client._emulator_channel) - self.assertEqual(client.SCOPE, (DATA_SCOPE,)) - - def test_constructor_explicit(self): - import warnings - from google.cloud.bigtable.client import ADMIN_SCOPE - from google.cloud.bigtable.client import DATA_SCOPE - - credentials = _make_credentials() - client_info = mock.Mock() - - with warnings.catch_warnings(record=True) as warned: - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - read_only=False, - admin=True, - client_info=client_info, - channel=mock.sentinel.channel, - ) - - self.assertEqual(len(warned), 1) - - self.assertEqual(client.project, self.PROJECT) - self.assertIs(client._credentials, credentials.with_scopes.return_value) - self.assertFalse(client._read_only) - self.assertTrue(client._admin) - self.assertIs(client._client_info, client_info) - self.assertIs(client._channel, mock.sentinel.channel) - self.assertEqual(client.SCOPE, (DATA_SCOPE, ADMIN_SCOPE)) - - def test_constructor_both_admin_and_read_only(self): - credentials = _make_credentials() - with self.assertRaises(ValueError): - self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - read_only=True, - ) - - def test_constructor_with_emulator_host(self): - from google.cloud.environment_vars import BIGTABLE_EMULATOR - - credentials = _make_credentials() - emulator_host = "localhost:8081" - with mock.patch("os.getenv") as getenv: - getenv.return_value = emulator_host - with mock.patch("grpc.insecure_channel") as factory: - getenv.return_value = emulator_host - client = self._make_one(project=self.PROJECT, credentials=credentials) - - self.assertEqual(client._emulator_host, emulator_host) - self.assertIs(client._emulator_channel, factory.return_value) - factory.assert_called_once_with(emulator_host) - getenv.assert_called_once_with(BIGTABLE_EMULATOR) - - def test__get_scopes_default(self): - from google.cloud.bigtable.client import DATA_SCOPE - - client = self._make_one(project=self.PROJECT, credentials=_make_credentials()) - self.assertEqual(client._get_scopes(), (DATA_SCOPE,)) - - def test__get_scopes_admin(self): - from google.cloud.bigtable.client import ADMIN_SCOPE - from google.cloud.bigtable.client import DATA_SCOPE - - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), admin=True - ) - expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) - self.assertEqual(client._get_scopes(), expected_scopes) - - def test__get_scopes_read_only(self): - from google.cloud.bigtable.client import READ_ONLY_SCOPE - - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), read_only=True - ) - self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) - - def test_project_path_property(self): - credentials = _make_credentials() - project = "PROJECT" - client = self._make_one(project=project, credentials=credentials, admin=True) - project_name = "projects/" + project - self.assertEqual(client.project_path, project_name) - - def test_table_data_client_not_initialized(self): - from google.cloud.bigtable.client import _CLIENT_INFO - from google.cloud.bigtable_v2 import BigtableClient - - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - table_data_client = client.table_data_client - self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, _CLIENT_INFO) - self.assertIs(client._table_data_client, table_data_client) - - def test_table_data_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_v2 import BigtableClient - - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, credentials=credentials, client_info=client_info - ) - - table_data_client = client.table_data_client - self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, client_info) - self.assertIs(client._table_data_client, table_data_client) - - def test_table_data_client_not_initialized_w_client_options(self): - credentials = _make_credentials() - client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, credentials=credentials, client_options=client_options - ) - - patch = mock.patch("google.cloud.bigtable_v2.BigtableClient") - with patch as mocked: - table_data_client = client.table_data_client - - self.assertIs(table_data_client, mocked.return_value) - self.assertIs(client._table_data_client, table_data_client) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=mock.ANY, # added scopes - client_options=client_options, - ) - - def test_table_data_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - already = client._table_data_client = object() - self.assertIs(client.table_data_client, already) - - def test_table_admin_client_not_initialized_no_admin_flag(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - with self.assertRaises(ValueError): - client.table_admin_client() - - def test_table_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable.client import _CLIENT_INFO - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - table_admin_client = client.table_admin_client - self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, _CLIENT_INFO) - self.assertIs(client._table_admin_client, table_admin_client) - - def test_table_admin_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - client_info=client_info, - ) - - table_admin_client = client.table_admin_client - self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, client_info) - self.assertIs(client._table_admin_client, table_admin_client) - - def test_table_admin_client_not_initialized_w_client_options(self): - credentials = _make_credentials() - admin_client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - admin_client_options=admin_client_options, - ) - - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") - with patch as mocked: - table_admin_client = client.table_admin_client - - self.assertIs(table_admin_client, mocked.return_value) - self.assertIs(client._table_admin_client, table_admin_client) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=mock.ANY, # added scopes - client_options=admin_client_options, - ) - - def test_table_admin_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - already = client._table_admin_client = object() - self.assertIs(client.table_admin_client, already) - - def test_instance_admin_client_not_initialized_no_admin_flag(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - with self.assertRaises(ValueError): - client.instance_admin_client() - - def test_instance_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable.client import _CLIENT_INFO - from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient - - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - instance_admin_client = client.instance_admin_client - self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, _CLIENT_INFO) - self.assertIs(client._instance_admin_client, instance_admin_client) - - def test_instance_admin_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient - - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - client_info=client_info, - ) - - instance_admin_client = client.instance_admin_client - self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, client_info) - self.assertIs(client._instance_admin_client, instance_admin_client) - - def test_instance_admin_client_not_initialized_w_client_options(self): - credentials = _make_credentials() - admin_client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - admin_client_options=admin_client_options, - ) - - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient") - with patch as mocked: - instance_admin_client = client.instance_admin_client - - self.assertIs(instance_admin_client, mocked.return_value) - self.assertIs(client._instance_admin_client, instance_admin_client) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=mock.ANY, # added scopes - client_options=admin_client_options, - ) - - def test_instance_admin_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - already = client._instance_admin_client = object() - self.assertIs(client.instance_admin_client, already) - - def test_instance_factory_defaults(self): - from google.cloud.bigtable.instance import Instance - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - - instance = client.instance(INSTANCE_ID) - - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, INSTANCE_ID) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - self.assertIs(instance._client, client) - - def test_instance_factory_non_defaults(self): - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable import enums - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - DISPLAY_NAME = "display-name" - instance_type = enums.Instance.Type.DEVELOPMENT - labels = {"foo": "bar"} - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - - instance = client.instance( - INSTANCE_ID, - display_name=DISPLAY_NAME, - instance_type=instance_type, - labels=labels, - ) - - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, labels) - self.assertIs(instance._client, client) - - def test_list_instances(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable.instance import Instance - - FAILED_LOCATION = "FAILED" - INSTANCE_ID1 = "instance-id1" - INSTANCE_ID2 = "instance-id2" - INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1 - INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2 - - credentials = _make_credentials() - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - # Create response_pb - response_pb = messages_v2_pb2.ListInstancesResponse( - failed_locations=[FAILED_LOCATION], - instances=[ - data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1), - data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2), - ], - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - bigtable_instance_stub = client.instance_admin_client.transport - bigtable_instance_stub.list_instances.side_effect = [response_pb] - - # Perform the method and check the result. - instances, failed_locations = client.list_instances() - - instance_1, instance_2 = instances - - self.assertIsInstance(instance_1, Instance) - self.assertEqual(instance_1.name, INSTANCE_NAME1) - self.assertTrue(instance_1._client is client) - - self.assertIsInstance(instance_2, Instance) - self.assertEqual(instance_2.name, INSTANCE_NAME2) - self.assertTrue(instance_2._client is client) - - self.assertEqual(failed_locations, [FAILED_LOCATION]) - - def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.instance import Cluster - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - - INSTANCE_ID1 = "instance-id1" - INSTANCE_ID2 = "instance-id2" - - failed_location = "FAILED" - cluster_id1 = "{}-cluster".format(INSTANCE_ID1) - cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2) - cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2) - cluster_name1 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID1, cluster_id1 - ) - cluster_name2 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id2 - ) - cluster_name3 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id3 - ) - - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[failed_location], - clusters=[ - data_v2_pb2.Cluster(name=cluster_name1), - data_v2_pb2.Cluster(name=cluster_name2), - data_v2_pb2.Cluster(name=cluster_name3), - ], - ) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport - instance_stub.list_clusters.side_effect = [response_pb] - - # Perform the method and check the result. - clusters, failed_locations = client.list_clusters() - - cluster_1, cluster_2, cluster_3 = clusters - - self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) - self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1) - - self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) - self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2) - - self.assertIsInstance(cluster_3, Cluster) - self.assertEqual(cluster_3.name, cluster_name3) - self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2) - - self.assertEqual(failed_locations, [failed_location]) diff --git a/bigtable/tests/unit/test_cluster.py b/bigtable/tests/unit/test_cluster.py deleted file mode 100644 index 9a0d39c84977..000000000000 --- a/bigtable/tests/unit/test_cluster.py +++ /dev/null @@ -1,496 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class TestCluster(unittest.TestCase): - - PROJECT = "project" - INSTANCE_ID = "instance-id" - LOCATION_ID = "location-id" - CLUSTER_ID = "cluster-id" - LOCATION_ID = "location-id" - CLUSTER_NAME = ( - "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID - ) - LOCATION_PATH = "projects/" + PROJECT + "/locations/" - SERVE_NODES = 5 - OP_ID = 5678 - OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( - PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID - ) - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.cluster import Cluster - - return Cluster - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one(self.CLUSTER_ID, instance) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertIs(cluster._instance, instance) - self.assertIsNone(cluster.location_id) - self.assertIsNone(cluster.state) - self.assertIsNone(cluster.serve_nodes) - self.assertIsNone(cluster.default_storage_type) - - def test_constructor_non_default(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - STATE = Cluster.State.READY - STORAGE_TYPE_SSD = StorageType.SSD - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - _state=STATE, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertIs(cluster._instance, instance) - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertEqual(cluster.state, STATE) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) - - def test_name_property(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) - - self.assertEqual(cluster.name, self.CLUSTER_NAME) - - def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable import enums - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - location = self.LOCATION_PATH + self.LOCATION_ID - state = enums.Cluster.State.RESIZING - storage_type = enums.StorageType.SSD - cluster_pb = data_v2_pb2.Cluster( - name=self.CLUSTER_NAME, - location=location, - state=state, - serve_nodes=self.SERVE_NODES, - default_storage_type=storage_type, - ) - - klass = self._get_target_class() - cluster = klass.from_pb(cluster_pb, instance) - self.assertIsInstance(cluster, klass) - self.assertEqual(cluster._instance, instance) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertEqual(cluster.state, state) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, storage_type) - - def test_from_pb_bad_cluster_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - bad_cluster_name = "BAD_NAME" - - cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) - - def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - ALT_INSTANCE_ID = "ALT_INSTANCE_ID" - client = _Client(self.PROJECT) - instance = _Instance(ALT_INSTANCE_ID, client) - - self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) - cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test___eq__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - self.assertEqual(cluster1, cluster2) - - def test___eq__type_differ(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = object() - self.assertNotEqual(cluster1, cluster2) - - def test___ne__same_value(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - comparison_val = cluster1 != cluster2 - self.assertFalse(comparison_val) - - def test___ne__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one("cluster_id1", instance, self.LOCATION_ID) - cluster2 = self._make_one("cluster_id2", instance, self.LOCATION_ID) - self.assertNotEqual(cluster1, cluster2) - - def test_reload(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - - # Create response_pb - LOCATION_ID_FROM_SERVER = "new-location-id" - STATE = Cluster.State.READY - SERVE_NODES_FROM_SERVER = 10 - STORAGE_TYPE_FROM_SERVER = StorageType.HDD - - response_pb = data_v2_pb2.Cluster( - name=cluster.name, - location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER, - state=STATE, - serve_nodes=SERVE_NODES_FROM_SERVER, - default_storage_type=STORAGE_TYPE_FROM_SERVER, - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Cluster optional config values before. - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertIsNone(cluster.state) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) - - # Perform the method and check the result. - result = cluster.reload() - self.assertEqual(result, expected_result) - self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER) - self.assertEqual(cluster.state, STATE) - self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.instance import Instance - from google.api_core import exceptions - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = Instance(self.INSTANCE_ID, client) - - # Create response_pb - cluster_name = client.instance_admin_client.cluster_path( - self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID - ) - response_pb = data_v2_pb2.Cluster(name=cluster_name) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_cluster.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_cluster_id = "cluster-id-2" - alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance) - alt_cluster_2 = self._make_one(non_existing_cluster_id, instance) - self.assertTrue(alt_cluster_1.exists()) - self.assertFalse(alt_cluster_2.exists()) - with self.assertRaises(exceptions.BadRequest): - alt_cluster_1.exists() - - def test_create(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2, - ) - from google.cloud.bigtable.enums import StorageType - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - LOCATION = self.LOCATION_PATH + self.LOCATION_ID - instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - expected_request_cluster = instance_pb2.Cluster( - location=LOCATION, - serve_nodes=cluster.serve_nodes, - default_storage_type=cluster.default_storage_type, - ) - expected_request = instance_v2_pb2.CreateClusterRequest( - parent=instance.name, - cluster_id=self.CLUSTER_ID, - cluster=expected_request_cluster, - ) - - metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - client._instance_admin_client = api - - # Perform the method and check the result. - result = cluster.create() - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.CreateClusterMetadata) - - def test_update(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable.enums import StorageType - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - # Create expected_request - expected_request = instance_pb2.Cluster( - name=cluster.name, serve_nodes=self.SERVE_NODES - ) - - metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) - client._instance_admin_client = api - - # Perform the method and check the result. - result = cluster.update() - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.delete_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = cluster.delete() - - self.assertEqual(result, expected_result) - - -class _Instance(object): - def __init__(self, instance_id, client): - self.instance_id = instance_id - self._client = client - - def __eq__(self, other): - return other.instance_id == self.instance_id and other._client == self._client - - -class _Client(object): - def __init__(self, project): - self.project = project - self.project_name = "projects/" + self.project - self._operations_stub = mock.sentinel.operations_stub - - def __eq__(self, other): - return other.project == self.project and other.project_name == self.project_name diff --git a/bigtable/tests/unit/test_column_family.py b/bigtable/tests/unit/test_column_family.py deleted file mode 100644 index d6f6c2672047..000000000000 --- a/bigtable/tests/unit/test_column_family.py +++ /dev/null @@ -1,636 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - -from ._testing import _make_credentials - - -class TestMaxVersionsGCRule(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - return MaxVersionsGCRule - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test___eq__type_differ(self): - gc_rule1 = self._make_one(10) - self.assertNotEqual(gc_rule1, object()) - self.assertEqual(gc_rule1, mock.ANY) - - def test___eq__same_value(self): - gc_rule1 = self._make_one(2) - gc_rule2 = self._make_one(2) - self.assertEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - gc_rule1 = self._make_one(99) - gc_rule2 = self._make_one(99) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - max_num_versions = 1337 - gc_rule = self._make_one(max_num_versions=max_num_versions) - pb_val = gc_rule.to_pb() - expected = _GcRulePB(max_num_versions=max_num_versions) - self.assertEqual(pb_val, expected) - - -class TestMaxAgeGCRule(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import MaxAgeGCRule - - return MaxAgeGCRule - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test___eq__type_differ(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___eq__same_value(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = self._make_one(max_age=max_age) - self.assertEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = self._make_one(max_age=max_age) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - - max_age = datetime.timedelta(seconds=1) - duration = duration_pb2.Duration(seconds=1) - gc_rule = self._make_one(max_age=max_age) - pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, _GcRulePB(max_age=duration)) - - -class TestGCRuleUnion(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import GCRuleUnion - - return GCRuleUnion - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_union = self._make_one(rules) - self.assertIs(rule_union.rules, rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - - rule5 = self._make_one(rules=[rule3, rule4]) - pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestGCRuleIntersection(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import GCRuleIntersection - - return GCRuleIntersection - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_intersection = self._make_one(rules) - self.assertIs(rule_intersection.rules, rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) - ) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) - ) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - - rule5 = self._make_one(rules=[rule3, rule4]) - pb_rule5 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4]) - ) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestColumnFamily(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import ColumnFamily - - return ColumnFamily - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - column_family_id = u"column-family-id" - table = object() - gc_rule = object() - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - self.assertEqual(column_family.column_family_id, column_family_id) - self.assertIs(column_family._table, table) - self.assertIs(column_family.gc_rule, gc_rule) - - def test_name_property(self): - column_family_id = u"column-family-id" - table_name = "table_name" - table = _Table(table_name) - column_family = self._make_one(column_family_id, table) - - expected_name = table_name + "/columnFamilies/" + column_family_id - self.assertEqual(column_family.name, expected_name) - - def test___eq__(self): - column_family_id = "column_family_id" - table = object() - gc_rule = object() - column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) - self.assertEqual(column_family1, column_family2) - - def test___eq__type_differ(self): - column_family1 = self._make_one("column_family_id", None) - column_family2 = object() - self.assertNotEqual(column_family1, column_family2) - - def test___ne__same_value(self): - column_family_id = "column_family_id" - table = object() - gc_rule = object() - column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) - comparison_val = column_family1 != column_family2 - self.assertFalse(comparison_val) - - def test___ne__(self): - column_family1 = self._make_one("column_family_id1", None) - column_family2 = self._make_one("column_family_id2", None) - self.assertNotEqual(column_family1, column_family2) - - def test_to_pb_no_rules(self): - column_family = self._make_one("column_family_id", None) - pb_val = column_family.to_pb() - expected = _ColumnFamilyPB() - self.assertEqual(pb_val, expected) - - def test_to_pb_with_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1) - column_family = self._make_one("column_family_id", None, gc_rule=gc_rule) - pb_val = column_family.to_pb() - expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - self.assertEqual(pb_val, expected) - - def _create_test_helper(self, gc_rule=None): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, - ) - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - column_family_pb = _ColumnFamilyPB() - else: - column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, create=column_family_pb) - - # Create response_pb - response_pb = _ColumnFamilyPB() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.create = stub - - # Create expected_result. - expected_result = None # create() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.create() - self.assertEqual(result, expected_result) - - def test_create(self): - self._create_test_helper(gc_rule=None) - - def test_create_with_gc_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1337) - self._create_test_helper(gc_rule=gc_rule) - - def _update_test_helper(self, gc_rule=None): - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - column_family_pb = _ColumnFamilyPB() - else: - column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, update=column_family_pb) - - # Create response_pb - response_pb = _ColumnFamilyPB() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.update = stub - - # Create expected_result. - expected_result = None # update() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.update() - self.assertEqual(result, expected_result) - - def test_update(self): - self._update_test_helper(gc_rule=None) - - def test_update_with_gc_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1337) - self._update_test_helper(gc_rule=gc_rule) - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, - ) - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table) - - # Create request_pb - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, drop=True) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.delete = stub - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.delete() - self.assertEqual(result, expected_result) - - -class Test__gc_rule_from_pb(unittest.TestCase): - def _call_fut(self, *args, **kwargs): - from google.cloud.bigtable.column_family import _gc_rule_from_pb - - return _gc_rule_from_pb(*args, **kwargs) - - def test_empty(self): - - gc_rule_pb = _GcRulePB() - self.assertIsNone(self._call_fut(gc_rule_pb)) - - def test_max_num_versions(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - orig_rule = MaxVersionsGCRule(1) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, MaxVersionsGCRule) - self.assertEqual(result, orig_rule) - - def test_max_age(self): - import datetime - from google.cloud.bigtable.column_family import MaxAgeGCRule - - orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, MaxAgeGCRule) - self.assertEqual(result, orig_rule) - - def test_union(self): - import datetime - from google.cloud.bigtable.column_family import GCRuleUnion - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleUnion([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, GCRuleUnion) - self.assertEqual(result, orig_rule) - - def test_intersection(self): - import datetime - from google.cloud.bigtable.column_family import GCRuleIntersection - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleIntersection([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, GCRuleIntersection) - self.assertEqual(result, orig_rule) - - def test_unknown_field_name(self): - class MockProto(object): - - names = [] - - @classmethod - def WhichOneof(cls, name): - cls.names.append(name) - return "unknown" - - self.assertEqual(MockProto.names, []) - self.assertRaises(ValueError, self._call_fut, MockProto) - self.assertEqual(MockProto.names, ["rule"]) - - -def _GcRulePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.GcRule(*args, **kw) - - -def _GcRuleIntersectionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.GcRule.Intersection(*args, **kw) - - -def _GcRuleUnionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.GcRule.Union(*args, **kw) - - -def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.ColumnFamily(*args, **kw) - - -class _Instance(object): - def __init__(self, client=None): - self._client = client - - -class _Client(object): - pass - - -class _Table(object): - def __init__(self, name, client=None): - self.name = name - self._instance = _Instance(client) diff --git a/bigtable/tests/unit/test_instance.py b/bigtable/tests/unit/test_instance.py deleted file mode 100644 index b129d4edc825..000000000000 --- a/bigtable/tests/unit/test_instance.py +++ /dev/null @@ -1,997 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials -from google.cloud.bigtable.cluster import Cluster - - -class TestInstance(unittest.TestCase): - - PROJECT = "project" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID - LOCATION_ID = "locid" - LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID - APP_PROFILE_PATH = ( - "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/" - ) - DISPLAY_NAME = "display_name" - LABELS = {"foo": "bar"} - OP_ID = 8915 - OP_NAME = "operations/projects/{}/instances/{}operations/{}".format( - PROJECT, INSTANCE_ID, OP_ID - ) - TABLE_ID = "table_id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.instance import Instance - - return Instance - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - - client = object() - instance = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - self.assertIs(instance._client, client) - self.assertIsNone(instance.state) - - def test_constructor_non_default(self): - from google.cloud.bigtable import enums - - instance_type = enums.Instance.Type.DEVELOPMENT - state = enums.Instance.State.READY - labels = {"test": "test"} - client = object() - - instance = self._make_one( - self.INSTANCE_ID, - client, - display_name=self.DISPLAY_NAME, - instance_type=instance_type, - labels=labels, - _state=state, - ) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, labels) - self.assertIs(instance._client, client) - self.assertEqual(instance.state, state) - - def test__update_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable import enums - - instance_type = enums.Instance.Type.PRODUCTION - state = enums.Instance.State.READY - instance_pb = data_v2_pb2.Instance( - display_name=self.DISPLAY_NAME, - type=instance_type, - labels=self.LABELS, - state=state, - ) - - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, self.LABELS) - self.assertEqual(instance._state, state) - - def test__update_from_pb_success_defaults(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable import enums - - instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) - - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) - self.assertFalse(instance.labels) - - def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - instance_pb = data_v2_pb2.Instance() - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - with self.assertRaises(ValueError): - instance._update_from_pb(instance_pb) - - def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable import enums - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance_type = enums.Instance.Type.PRODUCTION - state = enums.Instance.State.READY - instance_pb = data_v2_pb2.Instance( - name=self.INSTANCE_NAME, - display_name=self.INSTANCE_ID, - type=instance_type, - labels=self.LABELS, - state=state, - ) - - klass = self._get_target_class() - instance = klass.from_pb(instance_pb, client) - self.assertIsInstance(instance, klass) - self.assertEqual(instance._client, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, self.LABELS) - self.assertEqual(instance._state, state) - - def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - instance_name = "INCORRECT_FORMAT" - instance_pb = data_v2_pb2.Instance(name=instance_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, None) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - credentials = _make_credentials() - client = self._make_client( - project=ALT_PROJECT, credentials=credentials, admin=True - ) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - - instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, client) - - def test_name_property(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - - # Patch the the API method. - client._instance_admin_client = api - - instance = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance.name, self.INSTANCE_NAME) - - def test___eq__(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance1, instance2) - - def test___eq__type_differ(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = object() - self.assertNotEqual(instance1, instance2) - - def test___ne__same_value(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = self._make_one(self.INSTANCE_ID, client) - comparison_val = instance1 != instance2 - self.assertFalse(comparison_val) - - def test___ne__(self): - instance1 = self._make_one("instance_id1", "client1") - instance2 = self._make_one("instance_id2", "client2") - self.assertNotEqual(instance1, instance2) - - def test_create_check_location_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()]) - - def test_create_check_serve_nodes_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(serve_nodes=3, clusters=[object(), object()]) - - def test_create_check_default_storage_type_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(default_storage_type=1, clusters=[object(), object()]) - - def _instance_api_response_for_create(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - response = operation.from_gapic( - response_pb, - mock.Mock(), - instance_pb2.Instance, - metadata_type=messages_v2_pb2.CreateInstanceMetadata, - ) - project_path_template = "projects/{}" - location_path_template = "projects/{}/locations/{}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.create_instance.return_value = response - instance_api.project_path = project_path_template.format - instance_api.location_path = location_path_template.format - return instance_api, response - - def test_create(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 - import warnings - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS, - ) - instance_api, response = self._instance_api_response_for_create() - client._instance_admin_client = instance_api - serve_nodes = 3 - - with warnings.catch_warnings(record=True) as warned: - result = instance.create( - location_id=self.LOCATION_ID, serve_nodes=serve_nodes - ) - - cluster_pb = instance_pb2.Cluster( - location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), - serve_nodes=serve_nodes, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - instance_pb = instance_pb2.Instance( - display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, - labels=self.LABELS, - ) - cluster_id = "{}-cluster".format(self.INSTANCE_ID) - instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id: cluster_pb}, - ) - - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - self.assertIs(result, response) - - def test_create_w_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS, - ) - instance_api, response = self._instance_api_response_for_create() - client._instance_admin_client = instance_api - - # Perform the method and check the result. - cluster_id_1 = "cluster-1" - cluster_id_2 = "cluster-2" - location_id_1 = "location-id-1" - location_id_2 = "location-id-2" - serve_nodes_1 = 3 - serve_nodes_2 = 5 - clusters = [ - Cluster( - cluster_id_1, - instance, - location_id=location_id_1, - serve_nodes=serve_nodes_1, - ), - Cluster( - cluster_id_2, - instance, - location_id=location_id_2, - serve_nodes=serve_nodes_2, - ), - ] - - result = instance.create(clusters=clusters) - - cluster_pb_1 = instance_pb2.Cluster( - location=instance_api.location_path(self.PROJECT, location_id_1), - serve_nodes=serve_nodes_1, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - cluster_pb_2 = instance_pb2.Cluster( - location=instance_api.location_path(self.PROJECT, location_id_2), - serve_nodes=serve_nodes_2, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - instance_pb = instance_pb2.Instance( - display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, - labels=self.LABELS, - ) - instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, - ) - - self.assertIs(result, response) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.api_core import exceptions - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - - # Create response_pb - instance_name = client.instance_admin_client.instance_path( - self.PROJECT, self.INSTANCE_ID - ) - response_pb = data_v2_pb2.Instance(name=instance_name) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_instance.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_instance_id = "instance-id-2" - alt_instance_1 = self._make_one(self.INSTANCE_ID, client) - alt_instance_2 = self._make_one(non_existing_instance_id, client) - self.assertTrue(alt_instance_1.exists()) - self.assertFalse(alt_instance_2.exists()) - - with self.assertRaises(exceptions.BadRequest): - alt_instance_2.exists() - - def test_reload(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable import enums - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - # Create response_pb - DISPLAY_NAME = u"hey-hi-hello" - instance_type = enums.Instance.Type.PRODUCTION - response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - bigtable_instance_stub = client._instance_admin_client.transport - bigtable_instance_stub.get_instance.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, self.INSTANCE_ID) - - # Perform the method and check the result. - result = instance.reload() - self.assertEqual(result, expected_result) - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, DISPLAY_NAME) - - def _instance_api_response_for_update(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), - ) - response = operation.from_gapic( - response_pb, - mock.Mock(), - instance_pb2.Instance, - metadata_type=messages_v2_pb2.UpdateInstanceMetadata, - ) - instance_path_template = "projects/{project}/instances/{instance}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.partial_update_instance.return_value = response - instance_api.instance_path = instance_path_template.format - return instance_api, response - - def test_update(self): - from google.cloud.bigtable import enums - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - display_name=self.DISPLAY_NAME, - instance_type=enums.Instance.Type.DEVELOPMENT, - labels=self.LABELS, - ) - instance_api, response = self._instance_api_response_for_update() - client._instance_admin_client = instance_api - - result = instance.update() - - instance_pb = instance_pb2.Instance( - name=instance.name, - display_name=instance.display_name, - type=instance.type_, - labels=instance.labels, - ) - update_mask_pb = field_mask_pb2.FieldMask( - paths=["display_name", "type", "labels"] - ) - - instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb - ) - - self.assertIs(result, response) - - def test_update_empty(self): - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(None, client) - instance_api, response = self._instance_api_response_for_update() - client._instance_admin_client = instance_api - - result = instance.update() - - instance_pb = instance_pb2.Instance( - name=instance.name, - display_name=instance.display_name, - type=instance.type_, - labels=instance.labels, - ) - update_mask_pb = field_mask_pb2.FieldMask() - - instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb - ) - - self.assertIs(result, response) - - def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.delete_instance.return_value = None - client._instance_admin_client = instance_api - - result = instance.delete() - - instance_api.delete_instance.assert_called_once_with(instance.name) - - self.assertIsNone(result) - - def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - client._instance_admin_client = instance_api - instance_api.get_iam_policy.return_value = iam_policy - - # Perform the method and check the result. - result = instance.get_iam_policy() - - instance_api.get_iam_policy.assert_called_once_with(resource=instance.name) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.iam.v1 import policy_pb2, options_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - client._instance_admin_client = instance_api - instance_api.get_iam_policy.return_value = iam_policy - - # Perform the method and check the result. - result = instance.get_iam_policy(requested_policy_version=3) - - instance_api.get_iam_policy.assert_called_once_with( - resource=instance.name, - options_=options_pb2.GetPolicyOptions(requested_policy_version=3), - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] - iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.set_iam_policy.return_value = iam_policy_pb - client._instance_admin_client = instance_api - - # Perform the method and check the result. - iam_policy = Policy(etag=etag, version=version) - iam_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("user1@test.com"), - Policy.service_account("service_acc1@test.com"), - ] - - result = instance.set_iam_policy(iam_policy) - - instance_api.set_iam_policy.assert_called_once_with( - resource=instance.name, policy=iam_policy_pb - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.iam.v1 import iam_policy_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - permissions = ["bigtable.tables.create", "bigtable.clusters.create"] - - response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.test_iam_permissions.return_value = response - client._instance_admin_client = instance_api - - result = instance.test_iam_permissions(permissions) - - self.assertEqual(result, permissions) - instance_api.test_iam_permissions.assert_called_once_with( - resource=instance.name, permissions=permissions - ) - - def test_cluster_factory(self): - from google.cloud.bigtable import enums - - CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID) - LOCATION_ID = "us-central1-c" - SERVE_NODES = 3 - STORAGE_TYPE = enums.StorageType.HDD - - instance = self._make_one(self.INSTANCE_ID, None) - - cluster = instance.cluster( - CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE, - ) - self.assertIsInstance(cluster, Cluster) - self.assertEqual(cluster.cluster_id, CLUSTER_ID) - self.assertEqual(cluster.location_id, LOCATION_ID) - self.assertIsNone(cluster._state) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) - - def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.instance import Cluster - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = Instance(self.INSTANCE_ID, client) - - failed_location = "FAILED" - cluster_id1 = "cluster-id1" - cluster_id2 = "cluster-id2" - cluster_path_template = "projects/{}/instances/{}/clusters/{}" - cluster_name1 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id1 - ) - cluster_name2 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id2 - ) - - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[failed_location], - clusters=[ - data_v2_pb2.Cluster(name=cluster_name1), - data_v2_pb2.Cluster(name=cluster_name2), - ], - ) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - instance_api.list_clusters.side_effect = [response_pb] - instance_api.cluster_path = cluster_path_template.format - client._instance_admin_client = instance_api - - # Perform the method and check the result. - clusters, failed_locations = instance.list_clusters() - - cluster_1, cluster_2 = clusters - - self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) - - self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) - - self.assertEqual(failed_locations, [failed_location]) - - def test_table_factory(self): - from google.cloud.bigtable.table import Table - - app_profile_id = "appProfileId1262094415" - instance = self._make_one(self.INSTANCE_ID, None) - - table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) - self.assertIsInstance(table, Table) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertEqual(table._instance, instance) - self.assertEqual(table._app_profile_id, app_profile_id) - - def _list_tables_helper(self, table_name=None): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, - ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client, - bigtable_instance_admin_client, - ) - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - # Create response_pb - if table_name is None: - table_name = self.TABLE_NAME - - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[table_data_v2_pb2.Table(name=table_name)] - ) - - # Patch the stub used by the API method. - client._table_admin_client = table_api - client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport - bigtable_table_stub.list_tables.side_effect = [response_pb] - - # Create expected_result. - expected_table = instance.table(self.TABLE_ID) - expected_result = [expected_table] - - # Perform the method and check the result. - result = instance.list_tables() - - self.assertEqual(result, expected_result) - - def test_list_tables(self): - self._list_tables_helper() - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(table_name="wrong-format") - - def test_list_tables_failure_name_bad_before(self): - BAD_TABLE_NAME = ( - "nonempty-section-before" - + "projects/" - + self.PROJECT - + "/instances/" - + self.INSTANCE_ID - + "/tables/" - + self.TABLE_ID - ) - with self.assertRaises(ValueError): - self._list_tables_helper(table_name=BAD_TABLE_NAME) - - def test_app_profile_factory(self): - from google.cloud.bigtable.enums import RoutingPolicyType - - APP_PROFILE_ID_1 = "app-profile-id-1" - ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = "routing policy any" - APP_PROFILE_ID_2 = "app-profile-id-2" - SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = "routing policy single" - ALLOW_WRITES = True - CLUSTER_ID = "cluster-id" - - instance = self._make_one(self.INSTANCE_ID, None) - - app_profile1 = instance.app_profile( - APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1 - ) - - app_profile2 = instance.app_profile( - APP_PROFILE_ID_2, - routing_policy_type=SINGLE, - description=DESCRIPTION_2, - cluster_id=CLUSTER_ID, - allow_transactional_writes=ALLOW_WRITES, - ) - self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1) - self.assertIs(app_profile1._instance, instance) - self.assertEqual(app_profile1.routing_policy_type, ANY) - self.assertEqual(app_profile1.description, DESCRIPTION_1) - self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) - self.assertIs(app_profile2._instance, instance) - self.assertEqual(app_profile2.routing_policy_type, SINGLE) - self.assertEqual(app_profile2.description, DESCRIPTION_2) - self.assertEqual(app_profile2.cluster_id, CLUSTER_ID) - self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) - - def test_list_app_profiles(self): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable.app_profile import AppProfile - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - # Setup Expected Response - app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}" - app_profile_id1 = "app-profile-id1" - app_profile_id2 = "app-profile-id2" - app_profile_name1 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id1 - ) - app_profile_name2 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id2 - ) - routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() - - app_profiles = [ - data_v2_pb2.AppProfile( - name=app_profile_name1, multi_cluster_routing_use_any=routing_policy - ), - data_v2_pb2.AppProfile( - name=app_profile_name2, multi_cluster_routing_use_any=routing_policy - ), - ] - iterator = _Iterator(pages=[app_profiles]) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) - client._instance_admin_client = instance_api - instance_api.app_profile_path = app_profile_path_template.format - instance_api.list_app_profiles.return_value = iterator - - # Perform the method and check the result. - app_profiles = instance.list_app_profiles() - - app_profile_1, app_profile_2 = app_profiles - - self.assertIsInstance(app_profile_1, AppProfile) - self.assertEqual(app_profile_1.name, app_profile_name1) - - self.assertIsInstance(app_profile_2, AppProfile) - self.assertEqual(app_profile_2.name, app_profile_name2) diff --git a/bigtable/tests/unit/test_policy.py b/bigtable/tests/unit/test_policy.py deleted file mode 100644 index 63f9ba03fb23..000000000000 --- a/bigtable/tests/unit/test_policy.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestPolicy(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.policy import Policy - - return Policy - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor_defaults(self): - empty = frozenset() - policy = self._make_one() - self.assertIsNone(policy.etag) - self.assertIsNone(policy.version) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_ctor_explicit(self): - VERSION = 1 - ETAG = b"ETAG" - empty = frozenset() - policy = self._make_one(ETAG, VERSION) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_bigtable_admins_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_ADMIN_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_admins, expected) - - def test_bigtable_readers_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_READER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_readers, expected) - - def test_bigtable_users_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_USER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_users, expected) - - def test_bigtable_viewers_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_viewers, expected) - - def test_from_pb_empty(self): - from google.iam.v1 import policy_pb2 - - empty = frozenset() - message = policy_pb2.Policy() - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, b"") - self.assertEqual(policy.version, 0) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_from_pb_non_empty(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - ETAG = b"ETAG" - VERSION = 1 - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - empty = frozenset() - message = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}], - ) - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, set(members)) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 1) - self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)}) - - def test_from_pb_with_condition(self): - import pytest - from google.iam.v1 import policy_pb2 - from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - ETAG = b"ETAG" - VERSION = 3 - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - BINDINGS = [ - { - "role": BIGTABLE_ADMIN_ROLE, - "members": members, - "condition": { - "title": "request_time", - "description": "Requests made before 2021-01-01T00:00:00Z", - "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', - }, - } - ] - message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bindings[0]["role"], BIGTABLE_ADMIN_ROLE) - self.assertEqual(policy.bindings[0]["members"], set(members)) - self.assertEqual(policy.bindings[0]["condition"], BINDINGS[0]["condition"]) - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_admins - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_readers - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_users - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_viewers - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - len(policy) - - def test_to_pb_empty(self): - from google.iam.v1 import policy_pb2 - - policy = self._make_one() - expected = policy_pb2.Policy() - - self.assertEqual(policy.to_pb(), expected) - - def test_to_pb_explicit(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - VERSION = 1 - ETAG = b"ETAG" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - policy = self._make_one(ETAG, VERSION) - policy[BIGTABLE_ADMIN_ROLE] = members - expected = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[ - policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members)) - ], - ) - - self.assertEqual(policy.to_pb(), expected) - - def test_to_pb_with_condition(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - VERSION = 3 - ETAG = b"ETAG" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - condition = { - "title": "request_time", - "description": "Requests made before 2021-01-01T00:00:00Z", - "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', - } - policy = self._make_one(ETAG, VERSION) - policy.bindings = [ - { - "role": BIGTABLE_ADMIN_ROLE, - "members": set(members), - "condition": condition, - } - ] - expected = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[ - policy_pb2.Binding( - role=BIGTABLE_ADMIN_ROLE, - members=sorted(members), - condition=condition, - ) - ], - ) - - self.assertEqual(policy.to_pb(), expected) - - def test_from_api_repr_wo_etag(self): - VERSION = 1 - empty = frozenset() - resource = {"version": VERSION} - klass = self._get_target_class() - policy = klass.from_api_repr(resource) - self.assertIsNone(policy.etag) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_from_api_repr_w_etag(self): - import base64 - - ETAG = b"ETAG" - empty = frozenset() - resource = {"etag": base64.b64encode(ETAG).decode("ascii")} - klass = self._get_target_class() - policy = klass.from_api_repr(resource) - self.assertEqual(policy.etag, ETAG) - self.assertIsNone(policy.version) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_to_api_repr_wo_etag(self): - VERSION = 1 - resource = {"version": VERSION} - policy = self._make_one(version=VERSION) - self.assertEqual(policy.to_api_repr(), resource) - - def test_to_api_repr_w_etag(self): - import base64 - - ETAG = b"ETAG" - policy = self._make_one(etag=ETAG) - resource = {"etag": base64.b64encode(ETAG).decode("ascii")} - self.assertEqual(policy.to_api_repr(), resource) diff --git a/bigtable/tests/unit/test_row.py b/bigtable/tests/unit/test_row.py deleted file mode 100644 index 47424d910d97..000000000000 --- a/bigtable/tests/unit/test_row.py +++ /dev/null @@ -1,835 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - -from ._testing import _make_credentials - - -class TestRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import Row - - return Row - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_row_key_getter(self): - row = self._make_one(row_key=b"row_key", table="table") - self.assertEqual(b"row_key", row.row_key) - - def test_row_table_getter(self): - row = self._make_one(row_key=b"row_key", table="table") - self.assertEqual("table", row.table) - - -class Test_SetDeleteRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import _SetDeleteRow - - return _SetDeleteRow - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test__get_mutations_virtual(self): - row = self._make_one(b"row-key", None) - with self.assertRaises(NotImplementedError): - row._get_mutations(None) - - -class TestDirectRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import DirectRow - - return DirectRow - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - row_key = b"row_key" - table = object() - - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertEqual(row._pb_mutations, []) - - def test_constructor_with_unicode(self): - row_key = u"row_key" - row_key_bytes = b"row_key" - table = object() - - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key_bytes) - self.assertIs(row._table, table) - - def test_constructor_with_non_bytes(self): - row_key = object() - with self.assertRaises(TypeError): - self._make_one(row_key, None) - - def test__get_mutations(self): - row_key = b"row_key" - row = self._make_one(row_key, None) - - row._pb_mutations = mutations = object() - self.assertIs(mutations, row._get_mutations(None)) - - def test_get_mutations_size(self): - row_key = b"row_key" - row = self._make_one(row_key, None) - - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" - column1 = b"column1" - column2 = b"column2" - number_of_bytes = 1 * 1024 * 1024 - value = b"1" * number_of_bytes - - row.set_cell(column_family_id1, column1, value) - row.set_cell(column_family_id2, column2, value) - - total_mutations_size = 0 - for mutation in row._get_mutations(): - total_mutations_size += mutation.ByteSize() - - self.assertEqual(row.get_mutations_size(), total_mutations_size) - - def _set_cell_helper( - self, - column=None, - column_bytes=None, - value=b"foobar", - timestamp=None, - timestamp_micros=-1, - ): - import six - import struct - - row_key = b"row_key" - column_family_id = u"column_family_id" - if column is None: - column = b"column" - table = object() - row = self._make_one(row_key, table) - self.assertEqual(row._pb_mutations, []) - row.set_cell(column_family_id, column, value, timestamp=timestamp) - - if isinstance(value, six.integer_types): - value = struct.pack(">q", value) - expected_pb = _MutationPB( - set_cell=_MutationSetCellPB( - family_name=column_family_id, - column_qualifier=column_bytes or column, - timestamp_micros=timestamp_micros, - value=value, - ) - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_set_cell(self): - self._set_cell_helper() - - def test_set_cell_with_string_column(self): - column_bytes = b"column" - column_non_bytes = u"column" - self._set_cell_helper(column=column_non_bytes, column_bytes=column_bytes) - - def test_set_cell_with_integer_value(self): - value = 1337 - self._set_cell_helper(value=value) - - def test_set_cell_with_non_bytes_value(self): - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - value = object() # Not bytes - with self.assertRaises(TypeError): - row.set_cell(column_family_id, column, value) - - def test_set_cell_with_non_null_timestamp(self): - import datetime - from google.cloud._helpers import _EPOCH - - microseconds = 898294371 - millis_granularity = microseconds - (microseconds % 1000) - timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) - self._set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity) - - def test_delete(self): - row_key = b"row_key" - row = self._make_one(row_key, object()) - self.assertEqual(row._pb_mutations, []) - row.delete() - - expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB()) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cell(self): - klass = self._get_target_class() - - class MockRow(klass): - def __init__(self, *args, **kwargs): - super(MockRow, self).__init__(*args, **kwargs) - self._args = [] - self._kwargs = [] - - # Replace the called method with one that logs arguments. - def _delete_cells(self, *args, **kwargs): - self._args.append(args) - self._kwargs.append(kwargs) - - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - mock_row = MockRow(row_key, table) - # Make sure no values are set before calling the method. - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, []) - self.assertEqual(mock_row._kwargs, []) - - # Actually make the request against the mock class. - time_range = object() - mock_row.delete_cell(column_family_id, column, time_range=time_range) - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, [(column_family_id, [column])]) - self.assertEqual(mock_row._kwargs, [{"state": None, "time_range": time_range}]) - - def test_delete_cells_non_iterable(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = object() # Not iterable - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - - def test_delete_cells_all_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - klass = self._get_target_class() - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, klass.ALL_COLUMNS) - - expected_pb = _MutationPB( - delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id) - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cells_no_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = [] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - - def _delete_cells_helper(self, time_range=None): - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = [column] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns, time_range=time_range) - - expected_pb = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column - ) - ) - if time_range is not None: - expected_pb.delete_from_column.time_range.CopyFrom(time_range.to_pb()) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cells_no_time_range(self): - self._delete_cells_helper() - - def test_delete_cells_with_time_range(self): - import datetime - from google.cloud._helpers import _EPOCH - from google.cloud.bigtable.row_filters import TimestampRange - - microseconds = 30871000 # Makes sure already milliseconds granularity - start = _EPOCH + datetime.timedelta(microseconds=microseconds) - time_range = TimestampRange(start=start) - self._delete_cells_helper(time_range=time_range) - - def test_delete_cells_with_bad_column(self): - # This makes sure a failure on one of the columns doesn't leave - # the row's mutations in a bad state. - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = [column, object()] - self.assertEqual(row._pb_mutations, []) - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - - def test_delete_cells_with_string_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - column1 = u"column1" - column1_bytes = b"column1" - column2 = u"column2" - column2_bytes = b"column2" - table = object() - - row = self._make_one(row_key, table) - columns = [column1, column2] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) - - expected_pb1 = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column1_bytes - ) - ) - expected_pb2 = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column2_bytes - ) - ) - self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) - - def test_commit(self): - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - column_family_id = u"column_family_id" - column = b"column" - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - row = self._make_one(row_key, table) - value = b"bytes-value" - - # Perform the method and check the result. - row.set_cell(column_family_id, column, value) - row.commit() - self.assertEqual(table.mutated_rows, [row]) - - -class TestConditionalRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import ConditionalRow - - return ConditionalRow - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - row_key = b"row_key" - table = object() - filter_ = object() - - row = self._make_one(row_key, table, filter_=filter_) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertIs(row._filter, filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test__get_mutations(self): - row_key = b"row_key" - filter_ = object() - row = self._make_one(row_key, None, filter_=filter_) - - row._true_pb_mutations = true_mutations = object() - row._false_pb_mutations = false_mutations = object() - self.assertIs(true_mutations, row._get_mutations(True)) - self.assertIs(false_mutations, row._get_mutations(False)) - self.assertIs(false_mutations, row._get_mutations(None)) - - def test_commit(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.gapic import bigtable_client - - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - app_profile_id = "app_profile_id" - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" - column_family_id3 = u"column_family_id3" - column1 = b"column1" - column2 = b"column2" - - api = bigtable_client.BigtableClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client, app_profile_id=app_profile_id) - row_filter = RowSampleFilter(0.33) - row = self._make_one(row_key, table, filter_=row_filter) - - # Create request_pb - value1 = b"bytes-value" - - # Create response_pb - predicate_matched = True - response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) - - # Patch the stub used by the API method. - api.transport.check_and_mutate_row.side_effect = [response_pb] - client._table_data_client = api - - # Create expected_result. - expected_result = predicate_matched - - # Perform the method and check the result. - row.set_cell(column_family_id1, column1, value1, state=True) - row.delete(state=False) - row.delete_cell(column_family_id2, column2, state=True) - row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) - result = row.commit() - call_args = api.transport.check_and_mutate_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) - self.assertEqual(result, expected_result) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test_commit_too_many_mutations(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - - row_key = b"row_key" - table = object() - filter_ = object() - row = self._make_one(row_key, table, filter_=filter_) - row._true_pb_mutations = [1, 2, 3] - num_mutations = len(row._true_pb_mutations) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - def test_commit_no_mutations(self): - from tests.unit._testing import _FakeStub - - project_id = "project-id" - row_key = b"row_key" - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(None, client=client) - filter_ = object() - row = self._make_one(row_key, table, filter_=filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - # Patch the stub used by the API method. - stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertIsNone(result) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - -class TestAppendRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import AppendRow - - return AppendRow - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - row_key = b"row_key" - table = object() - - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertEqual(row._rule_pb_list, []) - - def test_clear(self): - row_key = b"row_key" - table = object() - row = self._make_one(row_key, table) - row._rule_pb_list = [1, 2, 3] - row.clear() - self.assertEqual(row._rule_pb_list, []) - - def test_append_cell_value(self): - table = object() - row_key = b"row_key" - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - column = b"column" - column_family_id = u"column_family_id" - value = b"bytes-val" - row.append_cell_value(column_family_id, column, value) - expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, column_qualifier=column, append_value=value - ) - self.assertEqual(row._rule_pb_list, [expected_pb]) - - def test_increment_cell_value(self): - table = object() - row_key = b"row_key" - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - column = b"column" - column_family_id = u"column_family_id" - int_value = 281330 - row.increment_cell_value(column_family_id, column, int_value) - expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value, - ) - self.assertEqual(row._rule_pb_list, [expected_pb]) - - def test_commit(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client - - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - app_profile_id = "app_profile_id" - column_family_id = u"column_family_id" - column = b"column" - - api = bigtable_client.BigtableClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client, app_profile_id=app_profile_id) - row = self._make_one(row_key, table) - - # Create request_pb - value = b"bytes-value" - - # Create expected_result. - row_responses = [] - expected_result = object() - - # Patch API calls - client._table_data_client = api - - def mock_parse_rmw_row_response(row_response): - row_responses.append(row_response) - return expected_result - - # Perform the method and check the result. - with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): - row.append_cell_value(column_family_id, column, value) - result = row.commit() - call_args = api.transport.read_modify_write_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) - self.assertEqual(result, expected_result) - self.assertEqual(row._rule_pb_list, []) - - def test_commit_no_rules(self): - from tests.unit._testing import _FakeStub - - project_id = "project-id" - row_key = b"row_key" - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(None, client=client) - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - # Patch the stub used by the API method. - stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertEqual(result, {}) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - def test_commit_too_many_mutations(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - - row_key = b"row_key" - table = object() - row = self._make_one(row_key, table) - row._rule_pb_list = [1, 2, 3] - num_mutations = len(row._rule_pb_list) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - -class Test__parse_rmw_row_response(unittest.TestCase): - def _call_fut(self, row_response): - from google.cloud.bigtable.row import _parse_rmw_row_response - - return _parse_rmw_row_response(row_response) - - def test_it(self): - from google.cloud._helpers import _datetime_from_microseconds - - col_fam1 = u"col-fam-id" - col_fam2 = u"col-fam-id2" - col_name1 = b"col-name1" - col_name2 = b"col-name2" - col_name3 = b"col-name3-but-other-fam" - cell_val1 = b"cell-val" - cell_val2 = b"cell-val-newer" - cell_val3 = b"altcol-cell-val" - cell_val4 = b"foo" - - microseconds = 1000871 - timestamp = _datetime_from_microseconds(microseconds) - expected_output = { - col_fam1: { - col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], - col_name2: [(cell_val3, timestamp)], - }, - col_fam2: {col_name3: [(cell_val4, timestamp)]}, - } - response_row = _RowPB( - families=[ - _FamilyPB( - name=col_fam1, - columns=[ - _ColumnPB( - qualifier=col_name1, - cells=[ - _CellPB(value=cell_val1, timestamp_micros=microseconds), - _CellPB(value=cell_val2, timestamp_micros=microseconds), - ], - ), - _ColumnPB( - qualifier=col_name2, - cells=[ - _CellPB(value=cell_val3, timestamp_micros=microseconds) - ], - ), - ], - ), - _FamilyPB( - name=col_fam2, - columns=[ - _ColumnPB( - qualifier=col_name3, - cells=[ - _CellPB(value=cell_val4, timestamp_micros=microseconds) - ], - ) - ], - ), - ] - ) - sample_input = _ReadModifyWriteRowResponsePB(row=response_row) - self.assertEqual(expected_output, self._call_fut(sample_input)) - - -class Test__parse_family_pb(unittest.TestCase): - def _call_fut(self, family_pb): - from google.cloud.bigtable.row import _parse_family_pb - - return _parse_family_pb(family_pb) - - def test_it(self): - from google.cloud._helpers import _datetime_from_microseconds - - col_fam1 = u"col-fam-id" - col_name1 = b"col-name1" - col_name2 = b"col-name2" - cell_val1 = b"cell-val" - cell_val2 = b"cell-val-newer" - cell_val3 = b"altcol-cell-val" - - microseconds = 5554441037 - timestamp = _datetime_from_microseconds(microseconds) - expected_dict = { - col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], - col_name2: [(cell_val3, timestamp)], - } - expected_output = (col_fam1, expected_dict) - sample_input = _FamilyPB( - name=col_fam1, - columns=[ - _ColumnPB( - qualifier=col_name1, - cells=[ - _CellPB(value=cell_val1, timestamp_micros=microseconds), - _CellPB(value=cell_val2, timestamp_micros=microseconds), - ], - ), - _ColumnPB( - qualifier=col_name2, - cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)], - ), - ], - ) - self.assertEqual(expected_output, self._call_fut(sample_input)) - - -def _CheckAndMutateRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) - - -def _ReadModifyWriteRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) - - -def _CellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Cell(*args, **kw) - - -def _ColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Column(*args, **kw) - - -def _FamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Family(*args, **kw) - - -def _MutationPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Mutation(*args, **kw) - - -def _MutationSetCellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Mutation.SetCell(*args, **kw) - - -def _MutationDeleteFromColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) - - -def _MutationDeleteFromFamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) - - -def _MutationDeleteFromRowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) - - -def _RowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.Row(*args, **kw) - - -def _ReadModifyWriteRulePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.ReadModifyWriteRule(*args, **kw) - - -class _Instance(object): - def __init__(self, client=None): - self._client = client - - -class _Table(object): - def __init__(self, name, client=None, app_profile_id=None): - self.name = name - self._instance = _Instance(client) - self._app_profile_id = app_profile_id - self.client = client - self.mutated_rows = [] - - def mutate_rows(self, rows): - self.mutated_rows.extend(rows) diff --git a/bigtable/tests/unit/test_row_data.py b/bigtable/tests/unit/test_row_data.py deleted file mode 100644 index b787233829b2..000000000000 --- a/bigtable/tests/unit/test_row_data.py +++ /dev/null @@ -1,1309 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest -import mock - -from google.api_core.exceptions import DeadlineExceeded -from ._testing import _make_credentials -from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class TestCell(unittest.TestCase): - timestamp_micros = 18738724000 # Make sure millis granularity - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import Cell - - return Cell - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def _from_pb_test_helper(self, labels=None): - import datetime - from google.cloud._helpers import _EPOCH - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - timestamp_micros = TestCell.timestamp_micros - timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) - value = b"value-bytes" - - if labels is None: - cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=timestamp_micros) - cell_expected = self._make_one(value, timestamp_micros) - else: - cell_pb = data_v2_pb2.Cell( - value=value, timestamp_micros=timestamp_micros, labels=labels - ) - cell_expected = self._make_one(value, timestamp_micros, labels=labels) - - klass = self._get_target_class() - result = klass.from_pb(cell_pb) - self.assertEqual(result, cell_expected) - self.assertEqual(result.timestamp, timestamp) - - def test_from_pb(self): - self._from_pb_test_helper() - - def test_from_pb_with_labels(self): - labels = [u"label1", u"label2"] - self._from_pb_test_helper(labels) - - def test_constructor(self): - value = object() - cell = self._make_one(value, TestCell.timestamp_micros) - self.assertEqual(cell.value, value) - - def test___eq__(self): - value = object() - cell1 = self._make_one(value, TestCell.timestamp_micros) - cell2 = self._make_one(value, TestCell.timestamp_micros) - self.assertEqual(cell1, cell2) - - def test___eq__type_differ(self): - cell1 = self._make_one(None, None) - cell2 = object() - self.assertNotEqual(cell1, cell2) - - def test___ne__same_value(self): - value = object() - cell1 = self._make_one(value, TestCell.timestamp_micros) - cell2 = self._make_one(value, TestCell.timestamp_micros) - comparison_val = cell1 != cell2 - self.assertFalse(comparison_val) - - def test___ne__(self): - value1 = "value1" - value2 = "value2" - cell1 = self._make_one(value1, TestCell.timestamp_micros) - cell2 = self._make_one(value2, TestCell.timestamp_micros) - self.assertNotEqual(cell1, cell2) - - -class TestPartialRowData(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowData - - return PartialRowData - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - row_key = object() - partial_row_data = self._make_one(row_key) - self.assertIs(partial_row_data._row_key, row_key) - self.assertEqual(partial_row_data._cells, {}) - - def test___eq__(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data2 = self._make_one(row_key) - self.assertEqual(partial_row_data1, partial_row_data2) - - def test___eq__type_differ(self): - partial_row_data1 = self._make_one(None) - partial_row_data2 = object() - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__same_value(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data2 = self._make_one(row_key) - comparison_val = partial_row_data1 != partial_row_data2 - self.assertFalse(comparison_val) - - def test___ne__(self): - row_key1 = object() - partial_row_data1 = self._make_one(row_key1) - row_key2 = object() - partial_row_data2 = self._make_one(row_key2) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__cells(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data1._cells = object() - partial_row_data2 = self._make_one(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test_to_dict(self): - cell1 = object() - cell2 = object() - cell3 = object() - - family_name1 = u"name1" - family_name2 = u"name2" - qual1 = b"col1" - qual2 = b"col2" - qual3 = b"col3" - - partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name1: {qual1: cell1, qual2: cell2}, - family_name2: {qual3: cell3}, - } - - result = partial_row_data.to_dict() - expected_result = { - b"name1:col1": cell1, - b"name1:col2": cell2, - b"name2:col3": cell3, - } - self.assertEqual(result, expected_result) - - def test_cell_value(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"value-bytes") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - result = partial_row_data.cell_value(family_name, qualifier) - self.assertEqual(result, cell.value) - - def test_cell_value_invalid_index(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - with self.assertRaises(IndexError): - partial_row_data.cell_value(family_name, qualifier, index=None) - - def test_cell_value_invalid_column_family_key(self): - family_name = u"name1" - qualifier = b"col1" - - partial_row_data = self._make_one(None) - - with self.assertRaises(KeyError): - partial_row_data.cell_value(family_name, qualifier) - - def test_cell_value_invalid_column_key(self): - family_name = u"name1" - qualifier = b"col1" - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {}} - - with self.assertRaises(KeyError): - partial_row_data.cell_value(family_name, qualifier) - - def test_cell_values(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"value-bytes") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - values = [] - for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier - ): - values.append(value) - - self.assertEqual(values[0], cell.value) - - def test_cell_values_with_max_count(self): - family_name = u"name1" - qualifier = b"col1" - cell_1 = _make_cell(b"value-bytes-1") - cell_2 = _make_cell(b"value-bytes-2") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}} - - values = [] - for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier, max_count=1 - ): - values.append(value) - - self.assertEqual(1, len(values)) - self.assertEqual(values[0], cell_1.value) - - def test_cells_property(self): - partial_row_data = self._make_one(None) - cells = {1: 2} - partial_row_data._cells = cells - self.assertEqual(partial_row_data.cells, cells) - - def test_row_key_getter(self): - row_key = object() - partial_row_data = self._make_one(row_key) - self.assertIs(partial_row_data.row_key, row_key) - - -class _Client(object): - - data_stub = None - - -class Test_retry_read_rows_exception(unittest.TestCase): - @staticmethod - def _call_fut(exc): - from google.cloud.bigtable.row_data import _retry_read_rows_exception - - return _retry_read_rows_exception(exc) - - @staticmethod - def _make_grpc_call_error(exception): - from grpc import Call - from grpc import RpcError - - class TestingException(Call, RpcError): - def __init__(self, exception): - self.exception = exception - - def code(self): - return self.exception.grpc_status_code - - def details(self): - return "Testing" - - return TestingException(exception) - - def test_w_miss(self): - from google.api_core.exceptions import Conflict - - exception = Conflict("testing") - self.assertFalse(self._call_fut(exception)) - - def test_w_service_unavailable(self): - from google.api_core.exceptions import ServiceUnavailable - - exception = ServiceUnavailable("testing") - self.assertTrue(self._call_fut(exception)) - - def test_w_deadline_exceeded(self): - from google.api_core.exceptions import DeadlineExceeded - - exception = DeadlineExceeded("testing") - self.assertTrue(self._call_fut(exception)) - - def test_w_miss_wrapped_in_grpc(self): - from google.api_core.exceptions import Conflict - - wrapped = Conflict("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertFalse(self._call_fut(exception)) - - def test_w_service_unavailable_wrapped_in_grpc(self): - from google.api_core.exceptions import ServiceUnavailable - - wrapped = ServiceUnavailable("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertTrue(self._call_fut(exception)) - - def test_w_deadline_exceeded_wrapped_in_grpc(self): - from google.api_core.exceptions import DeadlineExceeded - - wrapped = DeadlineExceeded("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertTrue(self._call_fut(exception)) - - -class TestPartialRowsData(unittest.TestCase): - ROW_KEY = b"row-key" - FAMILY_NAME = u"family" - QUALIFIER = b"qualifier" - TIMESTAMP_MICROS = 100 - VALUE = b"value" - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowsData - - return PartialRowsData - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS - - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, request) - self.assertIs(partial_rows_data.request, request) - self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, DEFAULT_RETRY_READ_ROWS) - - def test_constructor_with_retry(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = retry = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, request, retry) - self.assertIs(partial_rows_data.request, request) - self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, retry) - - def test___eq__(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(partial_rows_data1.rows, partial_rows_data2.rows) - - def test___eq__type_differ(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = object() - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test___ne__same_value(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - comparison_val = partial_rows_data1 != partial_rows_data2 - self.assertTrue(comparison_val) - - def test___ne__(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test_rows_getter(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data.rows = value = object() - self.assertIs(partial_rows_data.rows, value) - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_state_start(self): - client = _Client() - iterator = _MockCancellableIterator() - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_state_new_row_w_row(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - request = object() - - yrd = self._make_one(client._table_data_client.transport.read_rows, request) - - yrd._response_iterator = iterator - rows = [row for row in yrd] - - result = rows[0] - self.assertEqual(result.row_key, self.ROW_KEY) - self.assertEqual(yrd._counter, 1) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_multiple_chunks(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - - chunk1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=False, - ) - chunk2 = _ReadRowsResponseCellChunkPB( - qualifier=self.QUALIFIER + b"1", - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk1, chunk2] - - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - request = object() - - yrd = self._make_one(client._table_data_client.transport.read_rows, request) - - yrd._response_iterator = iterator - rows = [row for row in yrd] - result = rows[0] - self.assertEqual(result.row_key, self.ROW_KEY) - self.assertEqual(yrd._counter, 1) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_cancel(self): - client = _Client() - response_iterator = _MockCancellableIterator() - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [response_iterator] - request = object() - yield_rows_data = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(response_iterator.cancel_calls, 0) - yield_rows_data.cancel() - self.assertEqual(response_iterator.cancel_calls, 1) - self.assertEqual(list(yield_rows_data), []) - - # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' - - def test__copy_from_previous_unset(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, b"") - self.assertEqual(cell.family_name, u"") - self.assertIsNone(cell.qualifier) - self.assertEqual(cell.timestamp_micros, 0) - self.assertEqual(cell.labels, []) - - def test__copy_from_previous_blank(self): - ROW_KEY = "RK" - FAMILY_NAME = u"A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - yrd._previous_cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, ROW_KEY) - self.assertEqual(cell.family_name, FAMILY_NAME) - self.assertEqual(cell.qualifier, QUALIFIER) - self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS) - self.assertEqual(cell.labels, LABELS) - - def test__copy_from_previous_filled(self): - ROW_KEY = "RK" - FAMILY_NAME = u"A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - yrd._previous_cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, ROW_KEY) - self.assertEqual(cell.family_name, FAMILY_NAME) - self.assertEqual(cell.qualifier, QUALIFIER) - self.assertEqual(cell.timestamp_micros, 0) - self.assertEqual(cell.labels, []) - - def test_valid_last_scanned_row_key_on_start(self): - client = _Client() - response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - yrd.last_scanned_row_key = "1.BEFORE" - self._consume_all(yrd) - self.assertEqual(yrd.last_scanned_row_key, "2.AFTER") - - def test_invalid_empty_chunk(self): - from google.cloud.bigtable.row_data import InvalidChunk - - client = _Client() - chunks = _generate_cell_chunks([""]) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(InvalidChunk): - self._consume_all(yrd) - - def test_state_cell_in_progress(self): - LABELS = ["L1", "L2"] - - request = object() - read_rows = mock.MagicMock() - yrd = self._make_one(read_rows, request) - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - labels=LABELS, - ) - yrd._update_cell(chunk) - - more_cell_data = _ReadRowsResponseCellChunkPB(value=self.VALUE) - yrd._update_cell(more_cell_data) - - self.assertEqual(yrd._cell.row_key, self.ROW_KEY) - self.assertEqual(yrd._cell.family_name, self.FAMILY_NAME) - self.assertEqual(yrd._cell.qualifier, self.QUALIFIER) - self.assertEqual(yrd._cell.timestamp_micros, self.TIMESTAMP_MICROS) - self.assertEqual(yrd._cell.labels, LABELS) - self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) - - def test_yield_rows_data(self): - client = _Client() - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - - request = object() - - yrd = self._make_one(client._data_stub.ReadRows, request) - - result = self._consume_all(yrd)[0] - - self.assertEqual(result, self.ROW_KEY) - - def test_yield_retry_rows_data(self): - from google.api_core import retry - - client = _Client() - - retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - - response = _ReadRowsResponseV2(chunks) - failure_iterator = _MockFailureIterator_1() - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [failure_iterator, iterator] - - request = object() - - yrd = self._make_one(client._data_stub.ReadRows, request, retry_read_rows) - - result = self._consume_all(yrd)[0] - - self.assertEqual(result, self.ROW_KEY) - - def _consume_all(self, yrd): - return [row.row_key for row in yrd] - - -class Test_ReadRowsRequestManager(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.table_name = "table_name" - cls.row_range1 = RowRange(b"row_key21", b"row_key29") - cls.row_range2 = RowRange(b"row_key31", b"row_key39") - cls.row_range3 = RowRange(b"row_key41", b"row_key49") - - cls.request = _ReadRowsRequestPB(table_name=cls.table_name) - cls.request.rows.row_ranges.add(**cls.row_range1.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range2.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range3.get_range_kwargs()) - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import _ReadRowsRequestManager - - return _ReadRowsRequestManager - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - request = mock.Mock() - last_scanned_key = "last_key" - rows_read_so_far = 10 - - request_manager = self._make_one(request, last_scanned_key, rows_read_so_far) - self.assertEqual(request, request_manager.message) - self.assertEqual(last_scanned_key, request_manager.last_scanned_key) - self.assertEqual(rows_read_so_far, request_manager.rows_read_so_far) - - def test__filter_row_key(self): - table_name = "table_name" - request = _ReadRowsRequestPB(table_name=table_name) - request.rows.row_keys.extend( - [b"row_key1", b"row_key2", b"row_key3", b"row_key4"] - ) - - last_scanned_key = b"row_key2" - request_manager = self._make_one(request, last_scanned_key, 2) - row_keys = request_manager._filter_rows_keys() - - expected_row_keys = [b"row_key3", b"row_key4"] - self.assertEqual(expected_row_keys, row_keys) - - def test__filter_row_ranges_all_ranges_added_back(self): - last_scanned_key = b"row_key14" - request_manager = self._make_one(self.request, last_scanned_key, 2) - row_ranges = request_manager._filter_row_ranges() - - exp_row_range1 = data_v2_pb2.RowRange( - start_key_closed=b"row_key21", end_key_open=b"row_key29" - ) - exp_row_range2 = data_v2_pb2.RowRange( - start_key_closed=b"row_key31", end_key_open=b"row_key39" - ) - exp_row_range3 = data_v2_pb2.RowRange( - start_key_closed=b"row_key41", end_key_open=b"row_key49" - ) - exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] - - self.assertEqual(exp_row_ranges, row_ranges) - - def test__filter_row_ranges_all_ranges_already_read(self): - last_scanned_key = b"row_key54" - request_manager = self._make_one(self.request, last_scanned_key, 2) - row_ranges = request_manager._filter_row_ranges() - - self.assertEqual(row_ranges, []) - - def test__filter_row_ranges_all_ranges_already_read_open_closed(self): - last_scanned_key = b"row_key54" - - row_range1 = RowRange(b"row_key21", b"row_key29", False, True) - row_range2 = RowRange(b"row_key31", b"row_key39") - row_range3 = RowRange(b"row_key41", b"row_key49", False, True) - - request = _ReadRowsRequestPB(table_name=self.table_name) - request.rows.row_ranges.add(**row_range1.get_range_kwargs()) - request.rows.row_ranges.add(**row_range2.get_range_kwargs()) - request.rows.row_ranges.add(**row_range3.get_range_kwargs()) - - request_manager = self._make_one(request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) - row_ranges = request_manager._filter_row_ranges() - - self.assertEqual(row_ranges, []) - - def test__filter_row_ranges_some_ranges_already_read(self): - last_scanned_key = b"row_key22" - request_manager = self._make_one(self.request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) - row_ranges = request_manager._filter_row_ranges() - - exp_row_range1 = data_v2_pb2.RowRange( - start_key_open=b"row_key22", end_key_open=b"row_key29" - ) - exp_row_range2 = data_v2_pb2.RowRange( - start_key_closed=b"row_key31", end_key_open=b"row_key39" - ) - exp_row_range3 = data_v2_pb2.RowRange( - start_key_closed=b"row_key41", end_key_open=b"row_key49" - ) - exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] - - self.assertEqual(exp_row_ranges, row_ranges) - - def test_build_updated_request(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) - request.rows.row_ranges.add(**self.row_range1.get_range_kwargs()) - - request_manager = self._make_one(request, last_scanned_key, 2) - - result = request_manager.build_updated_request() - - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) - expected_result.rows.row_ranges.add( - start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key - ) - - self.assertEqual(expected_result, result) - - def test_build_updated_request_full_table(self): - last_scanned_key = b"row_key14" - - request = _ReadRowsRequestPB(table_name=self.table_name) - request_manager = self._make_one(request, last_scanned_key, 2) - - result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={}) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) - self.assertEqual(expected_result, result) - - def test_build_updated_request_no_start_key(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) - request.rows.row_ranges.add(end_key_open=b"row_key29") - - request_manager = self._make_one(request, last_scanned_key, 2) - - result = request_manager.build_updated_request() - - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) - expected_result.rows.row_ranges.add( - start_key_open=last_scanned_key, end_key_open=b"row_key29" - ) - - self.assertEqual(expected_result, result) - - def test_build_updated_request_no_end_key(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) - request.rows.row_ranges.add(start_key_closed=b"row_key20") - - request_manager = self._make_one(request, last_scanned_key, 2) - - result = request_manager.build_updated_request() - - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) - - self.assertEqual(expected_result, result) - - def test_build_updated_request_rows(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key4" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=5, table_name=self.table_name - ) - request.rows.row_keys.extend( - [ - b"row_key1", - b"row_key2", - b"row_key4", - b"row_key5", - b"row_key7", - b"row_key9", - ] - ) - - request_manager = self._make_one(request, last_scanned_key, 3) - - result = request_manager.build_updated_request() - - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=2 - ) - expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"]) - - self.assertEqual(expected_result, result) - - def test_build_updated_request_rows_limit(self): - last_scanned_key = b"row_key14" - - request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10) - request_manager = self._make_one(request, last_scanned_key, 2) - - result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter={}, rows_limit=8 - ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) - self.assertEqual(expected_result, result) - - def test__key_already_read(self): - last_scanned_key = b"row_key14" - request = _ReadRowsRequestPB(table_name=self.table_name) - request_manager = self._make_one(request, last_scanned_key, 2) - - self.assertTrue(request_manager._key_already_read(b"row_key11")) - self.assertFalse(request_manager._key_already_read(b"row_key16")) - - -class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): - - _json_tests = None - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowsData - - return PartialRowsData - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def _load_json_test(self, test_name): - import os - - if self.__class__._json_tests is None: - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, "read-rows-acceptance-test.json") - raw = _parse_readrows_acceptance_tests(filename) - tests = self.__class__._json_tests = {} - for (name, chunks, results) in raw: - tests[name] = chunks, results - return self.__class__._json_tests[test_name] - - # JSON Error cases: invalid chunks - - def _fail_during_consume(self, testcase_name): - from google.cloud.bigtable.row_data import InvalidChunk - - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(InvalidChunk): - prd.consume_all() - expected_result = self._sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - self.assertEqual(flattened, expected_result) - - def test_invalid_no_cell_key_before_commit(self): - self._fail_during_consume("invalid - no cell key before commit") - - def test_invalid_no_cell_key_before_value(self): - self._fail_during_consume("invalid - no cell key before value") - - def test_invalid_new_col_family_wo_qualifier(self): - self._fail_during_consume("invalid - new col family must specify qualifier") - - def test_invalid_no_commit_between_rows(self): - self._fail_during_consume("invalid - no commit between rows") - - def test_invalid_no_commit_after_first_row(self): - self._fail_during_consume("invalid - no commit after first row") - - def test_invalid_duplicate_row_key(self): - self._fail_during_consume("invalid - duplicate row key") - - def test_invalid_new_row_missing_row_key(self): - self._fail_during_consume("invalid - new row missing row key") - - def test_invalid_bare_reset(self): - self._fail_during_consume("invalid - bare reset") - - def test_invalid_bad_reset_no_commit(self): - self._fail_during_consume("invalid - bad reset, no commit") - - def test_invalid_missing_key_after_reset(self): - self._fail_during_consume("invalid - missing key after reset") - - def test_invalid_reset_with_chunk(self): - self._fail_during_consume("invalid - reset with chunk") - - def test_invalid_commit_with_chunk(self): - self._fail_during_consume("invalid - commit with chunk") - - # JSON Error cases: incomplete final row - - def _sort_flattend_cells(self, flattened): - import operator - - key_func = operator.itemgetter("rk", "fm", "qual") - return sorted(flattened, key=key_func) - - def _incomplete_final_row(self, testcase_name): - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(ValueError): - prd.consume_all() - self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) - expected_result = self._sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - self.assertEqual(flattened, expected_result) - - def test_invalid_no_commit(self): - self._incomplete_final_row("invalid - no commit") - - def test_invalid_last_row_missing_commit(self): - self._incomplete_final_row("invalid - last row missing commit") - - # Non-error cases - - _marker = object() - - def _match_results(self, testcase_name, expected_result=_marker): - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = self._make_one(client._data_stub.ReadRows, request) - prd.consume_all() - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - if expected_result is self._marker: - expected_result = self._sort_flattend_cells(results) - self.assertEqual(flattened, expected_result) - - def test_bare_commit_implies_ts_zero(self): - self._match_results("bare commit implies ts=0") - - def test_simple_row_with_timestamp(self): - self._match_results("simple row with timestamp") - - def test_missing_timestamp_implies_ts_zero(self): - self._match_results("missing timestamp, implied ts=0") - - def test_empty_cell_value(self): - self._match_results("empty cell value") - - def test_two_unsplit_cells(self): - self._match_results("two unsplit cells") - - def test_two_qualifiers(self): - self._match_results("two qualifiers") - - def test_two_families(self): - self._match_results("two families") - - def test_with_labels(self): - self._match_results("with labels") - - def test_split_cell_bare_commit(self): - self._match_results("split cell, bare commit") - - def test_split_cell(self): - self._match_results("split cell") - - def test_split_four_ways(self): - self._match_results("split four ways") - - def test_two_split_cells(self): - self._match_results("two split cells") - - def test_multi_qualifier_splits(self): - self._match_results("multi-qualifier splits") - - def test_multi_qualifier_multi_split(self): - self._match_results("multi-qualifier multi-split") - - def test_multi_family_split(self): - self._match_results("multi-family split") - - def test_two_rows(self): - self._match_results("two rows") - - def test_two_rows_implicit_timestamp(self): - self._match_results("two rows implicit timestamp") - - def test_two_rows_empty_value(self): - self._match_results("two rows empty value") - - def test_two_rows_one_with_multiple_cells(self): - self._match_results("two rows, one with multiple cells") - - def test_two_rows_multiple_cells_multiple_families(self): - self._match_results("two rows, multiple cells, multiple families") - - def test_two_rows_multiple_cells(self): - self._match_results("two rows, multiple cells") - - def test_two_rows_four_cells_two_labels(self): - self._match_results("two rows, four cells, 2 labels") - - def test_two_rows_with_splits_same_timestamp(self): - self._match_results("two rows with splits, same timestamp") - - def test_no_data_after_reset(self): - # JSON testcase has `"results": null` - self._match_results("no data after reset", expected_result=[]) - - def test_simple_reset(self): - self._match_results("simple reset") - - def test_reset_to_new_val(self): - self._match_results("reset to new val") - - def test_reset_to_new_qual(self): - self._match_results("reset to new qual") - - def test_reset_with_splits(self): - self._match_results("reset with splits") - - def test_two_resets(self): - self._match_results("two resets") - - def test_reset_to_new_row(self): - self._match_results("reset to new row") - - def test_reset_in_between_chunks(self): - self._match_results("reset in between chunks") - - def test_empty_cell_chunk(self): - self._match_results("empty cell chunk") - - def test_empty_second_qualifier(self): - self._match_results("empty second qualifier") - - -def _flatten_cells(prd): - # Match results format from JSON testcases. - # Doesn't handle error cases. - from google.cloud._helpers import _bytes_to_unicode - from google.cloud._helpers import _microseconds_from_datetime - - for row_key, row in prd.rows.items(): - for family_name, family in row.cells.items(): - for qualifier, column in family.items(): - for cell in column: - yield { - u"rk": _bytes_to_unicode(row_key), - u"fm": family_name, - u"qual": _bytes_to_unicode(qualifier), - u"ts": _microseconds_from_datetime(cell.timestamp), - u"value": _bytes_to_unicode(cell.value), - u"label": u" ".join(cell.labels), - u"error": False, - } - - -class _MockCancellableIterator(object): - - cancel_calls = 0 - - def __init__(self, *values): - self.iter_values = iter(values) - - def cancel(self): - self.cancel_calls += 1 - - def next(self): - return next(self.iter_values) - - __next__ = next - - -class _MockFailureIterator_1(object): - def next(self): - raise DeadlineExceeded("Failed to read from server") - - __next__ = next - - -class _PartialCellData(object): - - row_key = b"" - family_name = u"" - qualifier = None - timestamp_micros = 0 - - def __init__(self, **kw): - self.labels = kw.pop("labels", []) - self.__dict__.update(kw) - - -class _ReadRowsResponseV2(object): - def __init__(self, chunks, last_scanned_row_key=""): - self.chunks = chunks - self.last_scanned_row_key = last_scanned_row_key - - -def _generate_cell_chunks(chunk_text_pbs): - from google.protobuf.text_format import Merge - from google.cloud.bigtable_v2.proto.bigtable_pb2 import ReadRowsResponse - - chunks = [] - - for chunk_text_pb in chunk_text_pbs: - chunk = ReadRowsResponse.CellChunk() - chunks.append(Merge(chunk_text_pb, chunk)) - - return chunks - - -def _parse_readrows_acceptance_tests(filename): - """Parse acceptance tests from JSON - - See - https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/\ - 4d3185662ca61bc9fa1bdf1ec0166f6e5ecf86c6/bigtable-client-core/src/\ - test/resources/com/google/cloud/bigtable/grpc/scanner/v2/ - read-rows-acceptance-test.json - """ - import json - - with open(filename) as json_file: - test_json = json.load(json_file) - - for test in test_json["tests"]: - name = test["name"] - chunks = _generate_cell_chunks(test["chunks"]) - results = test["results"] - yield name, chunks, results - - -def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - family_name = kw.pop("family_name", None) - qualifier = kw.pop("qualifier", None) - message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) - - if family_name: - message.family_name.value = family_name - if qualifier: - message.qualifier.value = qualifier - - return message - - -def _make_cell(value): - from google.cloud.bigtable import row_data - - return row_data.Cell(value, TestCell.timestamp_micros) - - -def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.ReadRowsRequest(*args, **kw) - - -def _read_rows_retry_exception(exc): - return isinstance(exc, DeadlineExceeded) diff --git a/bigtable/tests/unit/test_row_filters.py b/bigtable/tests/unit/test_row_filters.py deleted file mode 100644 index 1c51651d8c44..000000000000 --- a/bigtable/tests/unit/test_row_filters.py +++ /dev/null @@ -1,1033 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - - -class Test_BoolFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _BoolFilter - - return _BoolFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - flag = object() - row_filter = self._make_one(flag) - self.assertIs(row_filter.flag, flag) - - def test___eq__type_differ(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = self._make_one(flag) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = self._make_one(flag) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) - - -class TestSinkFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import SinkFilter - - return SinkFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(sink=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestPassAllFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import PassAllFilter - - return PassAllFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(pass_all_filter=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestBlockAllFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import BlockAllFilter - - return BlockAllFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(block_all_filter=flag) - self.assertEqual(pb_val, expected_pb) - - -class Test_RegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _RegexFilter - - return _RegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - regex = b"abc" - row_filter = self._make_one(regex) - self.assertIs(row_filter.regex, regex) - - def test_constructor_non_bytes(self): - regex = u"abc" - row_filter = self._make_one(regex) - self.assertEqual(row_filter.regex, b"abc") - - def test___eq__type_differ(self): - regex = b"def-rgx" - row_filter1 = self._make_one(regex) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - regex = b"trex-regex" - row_filter1 = self._make_one(regex) - row_filter2 = self._make_one(regex) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - regex = b"abc" - row_filter1 = self._make_one(regex) - row_filter2 = self._make_one(regex) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) - - -class TestRowKeyRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowKeyRegexFilter - - return RowKeyRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = b"row-key-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(row_key_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestRowSampleFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowSampleFilter - - return RowSampleFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - sample = object() - row_filter = self._make_one(sample) - self.assertIs(row_filter.sample, sample) - - def test___eq__type_differ(self): - sample = object() - row_filter1 = self._make_one(sample) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - sample = object() - row_filter1 = self._make_one(sample) - row_filter2 = self._make_one(sample) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - sample = 0.25 - row_filter = self._make_one(sample) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(row_sample_filter=sample) - self.assertEqual(pb_val, expected_pb) - - -class TestFamilyNameRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import FamilyNameRegexFilter - - return FamilyNameRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = u"family-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(family_name_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnQualifierRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter - - return ColumnQualifierRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = b"column-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestTimestampRange(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import TimestampRange - - return TimestampRange - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - start = object() - end = object() - time_range = self._make_one(start=start, end=end) - self.assertIs(time_range.start, start) - self.assertIs(time_range.end, end) - - def test___eq__(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = self._make_one(start=start, end=end) - self.assertEqual(time_range1, time_range2) - - def test___eq__type_differ(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = object() - self.assertNotEqual(time_range1, time_range2) - - def test___ne__same_value(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = self._make_one(start=start, end=end) - comparison_val = time_range1 != time_range2 - self.assertFalse(comparison_val) - - def _to_pb_helper(self, pb_kwargs, start=None, end=None): - import datetime - from google.cloud._helpers import _EPOCH - - if start is not None: - start = _EPOCH + datetime.timedelta(microseconds=start) - if end is not None: - end = _EPOCH + datetime.timedelta(microseconds=end) - time_range = self._make_one(start=start, end=end) - expected_pb = _TimestampRangePB(**pb_kwargs) - time_pb = time_range.to_pb() - self.assertEqual( - time_pb.start_timestamp_micros, expected_pb.start_timestamp_micros - ) - self.assertEqual(time_pb.end_timestamp_micros, expected_pb.end_timestamp_micros) - self.assertEqual(time_pb, expected_pb) - - def test_to_pb(self): - start_micros = 30871234 - end_micros = 12939371234 - start_millis = start_micros // 1000 * 1000 - self.assertEqual(start_millis, 30871000) - end_millis = end_micros // 1000 * 1000 + 1000 - self.assertEqual(end_millis, 12939372000) - pb_kwargs = {} - pb_kwargs["start_timestamp_micros"] = start_millis - pb_kwargs["end_timestamp_micros"] = end_millis - self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) - - def test_to_pb_start_only(self): - # Makes sure already milliseconds granularity - start_micros = 30871000 - start_millis = start_micros // 1000 * 1000 - self.assertEqual(start_millis, 30871000) - pb_kwargs = {} - pb_kwargs["start_timestamp_micros"] = start_millis - self._to_pb_helper(pb_kwargs, start=start_micros, end=None) - - def test_to_pb_end_only(self): - # Makes sure already milliseconds granularity - end_micros = 12939371000 - end_millis = end_micros // 1000 * 1000 - self.assertEqual(end_millis, 12939371000) - pb_kwargs = {} - pb_kwargs["end_timestamp_micros"] = end_millis - self._to_pb_helper(pb_kwargs, start=None, end=end_micros) - - -class TestTimestampRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter - - return TimestampRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - range_ = object() - row_filter = self._make_one(range_) - self.assertIs(row_filter.range_, range_) - - def test___eq__type_differ(self): - range_ = object() - row_filter1 = self._make_one(range_) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - range_ = object() - row_filter1 = self._make_one(range_) - row_filter2 = self._make_one(range_) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from google.cloud.bigtable.row_filters import TimestampRange - - range_ = TimestampRange() - row_filter = self._make_one(range_) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter - - return ColumnRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - column_family_id = object() - row_filter = self._make_one(column_family_id) - self.assertIs(row_filter.column_family_id, column_family_id) - self.assertIsNone(row_filter.start_column) - self.assertIsNone(row_filter.end_column) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertIs(row_filter.column_family_id, column_family_id) - self.assertIs(row_filter.start_column, start_column) - self.assertIs(row_filter.end_column, end_column) - self.assertIs(row_filter.inclusive_start, inclusive_start) - self.assertIs(row_filter.inclusive_end, inclusive_end) - - def test_constructor_bad_start(self): - column_family_id = object() - self.assertRaises( - ValueError, self._make_one, column_family_id, inclusive_start=True - ) - - def test_constructor_bad_end(self): - column_family_id = object() - self.assertRaises( - ValueError, self._make_one, column_family_id, inclusive_end=True - ) - - def test___eq__(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - row_filter2 = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - column_family_id = object() - row_filter1 = self._make_one(column_family_id) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - column_family_id = u"column-family-id" - row_filter = self._make_one(column_family_id) - col_range_pb = _ColumnRangePB(family_name=column_family_id) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one(column_family_id, start_column=column) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, start_qualifier_closed=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one( - column_family_id, start_column=column, inclusive_start=False - ) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, start_qualifier_open=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one(column_family_id, end_column=column) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, end_qualifier_closed=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one( - column_family_id, end_column=column, inclusive_end=False - ) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, end_qualifier_open=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class TestValueRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ValueRegexFilter - - return ValueRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = b"value-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestValueRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ValueRangeFilter - - return ValueRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - row_filter = self._make_one() - self.assertIsNone(row_filter.start_value) - self.assertIsNone(row_filter.end_value) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - row_filter = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertIs(row_filter.start_value, start_value) - self.assertIs(row_filter.end_value, end_value) - self.assertIs(row_filter.inclusive_start, inclusive_start) - self.assertIs(row_filter.inclusive_end, inclusive_end) - - def test_constructor_bad_start(self): - self.assertRaises(ValueError, self._make_one, inclusive_start=True) - - def test_constructor_bad_end(self): - self.assertRaises(ValueError, self._make_one, inclusive_end=True) - - def test___eq__(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - row_filter2 = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - row_filter1 = self._make_one() - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - row_filter = self._make_one() - expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - value = b"some-value" - row_filter = self._make_one(start_value=value) - val_range_pb = _ValueRangePB(start_value_closed=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - value = b"some-value" - row_filter = self._make_one(start_value=value, inclusive_start=False) - val_range_pb = _ValueRangePB(start_value_open=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - value = b"some-value" - row_filter = self._make_one(end_value=value) - val_range_pb = _ValueRangePB(end_value_closed=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - value = b"some-value" - row_filter = self._make_one(end_value=value, inclusive_end=False) - val_range_pb = _ValueRangePB(end_value_open=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class Test_CellCountFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _CellCountFilter - - return _CellCountFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - num_cells = object() - row_filter = self._make_one(num_cells) - self.assertIs(row_filter.num_cells, num_cells) - - def test___eq__type_differ(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = self._make_one(num_cells) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = self._make_one(num_cells) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) - - -class TestCellsRowOffsetFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - - return CellsRowOffsetFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - num_cells = 76 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsRowLimitFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - - return CellsRowLimitFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - num_cells = 189 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsColumnLimitFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsColumnLimitFilter - - return CellsColumnLimitFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - num_cells = 10 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestStripValueTransformerFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - return StripValueTransformerFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(strip_value_transformer=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestApplyLabelFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter - - return ApplyLabelFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - label = object() - row_filter = self._make_one(label) - self.assertIs(row_filter.label, label) - - def test___eq__type_differ(self): - label = object() - row_filter1 = self._make_one(label) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - label = object() - row_filter1 = self._make_one(label) - row_filter2 = self._make_one(label) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - label = u"label" - row_filter = self._make_one(label) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(apply_label_transformer=label) - self.assertEqual(pb_val, expected_pb) - - -class Test_FilterCombination(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _FilterCombination - - return _FilterCombination - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - row_filter = self._make_one() - self.assertEqual(row_filter.filters, []) - - def test_constructor_explicit(self): - filters = object() - row_filter = self._make_one(filters=filters) - self.assertIs(row_filter.filters, filters) - - def test___eq__(self): - filters = object() - row_filter1 = self._make_one(filters=filters) - row_filter2 = self._make_one(filters=filters) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - filters = object() - row_filter1 = self._make_one(filters=filters) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - -class TestRowFilterChain(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowFilterChain - - return RowFilterChain - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() - - expected_pb = _RowFilterPB( - chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_nested(self): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) - - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() - - row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() - - expected_pb = _RowFilterPB( - chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) - ) - self.assertEqual(filter_pb, expected_pb) - - -class TestRowFilterUnion(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowFilterUnion - - return RowFilterUnion - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() - - expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_nested(self): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) - - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() - - row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() - - expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) - ) - self.assertEqual(filter_pb, expected_pb) - - -class TestConditionalRowFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter - - return ConditionalRowFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - self.assertIs(cond_filter.base_filter, base_filter) - self.assertIs(cond_filter.true_filter, true_filter) - self.assertIs(cond_filter.false_filter, false_filter) - - def test___eq__(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - cond_filter2 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - self.assertEqual(cond_filter1, cond_filter2) - - def test___eq__type_differ(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - cond_filter2 = object() - self.assertNotEqual(cond_filter1, cond_filter2) - - def test_to_pb(self): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = CellsRowOffsetFilter(11) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = self._make_one( - row_filter1, true_filter=row_filter2, false_filter=row_filter3 - ) - filter_pb = row_filter4.to_pb() - - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, - true_filter=row_filter2_pb, - false_filter=row_filter3_pb, - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_true_only(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._make_one(row_filter1, true_filter=row_filter2) - filter_pb = row_filter3.to_pb() - - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, true_filter=row_filter2_pb - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_false_only(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._make_one(row_filter1, false_filter=row_filter2) - filter_pb = row_filter3.to_pb() - - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, false_filter=row_filter2_pb - ) - ) - self.assertEqual(filter_pb, expected_pb) - - -def _ColumnRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.ColumnRange(*args, **kw) - - -def _RowFilterPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.RowFilter(*args, **kw) - - -def _RowFilterChainPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.RowFilter.Chain(*args, **kw) - - -def _RowFilterConditionPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.RowFilter.Condition(*args, **kw) - - -def _RowFilterInterleavePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.RowFilter.Interleave(*args, **kw) - - -def _TimestampRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.TimestampRange(*args, **kw) - - -def _ValueRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 - - return data_v2_pb2.ValueRange(*args, **kw) diff --git a/bigtable/tests/unit/test_row_set.py b/bigtable/tests/unit/test_row_set.py deleted file mode 100644 index c66341b84ec6..000000000000 --- a/bigtable/tests/unit/test_row_set.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest -from google.cloud.bigtable.row_set import RowRange -from google.cloud._helpers import _to_bytes - - -class TestRowSet(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_set import RowSet - - return RowSet - - def _make_one(self): - return self._get_target_class()() - - def test_constructor(self): - row_set = self._make_one() - self.assertEqual([], row_set.row_keys) - self.assertEqual([], row_set.row_ranges) - - def test__eq__(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - self.assertEqual(row_set1, row_set2) - - def test__eq__type_differ(self): - row_set1 = self._make_one() - row_set2 = object() - self.assertNotEqual(row_set1, row_set2) - - def test__eq__len_row_keys_differ(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set1.add_row_key(row_key2) - row_set2.add_row_key(row_key2) - - self.assertNotEqual(row_set1, row_set2) - - def test__eq__len_row_ranges_differ(self): - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_range(row_range1) - row_set1.add_row_range(row_range2) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__eq__row_keys_differ(self): - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(b"row_key1") - row_set1.add_row_key(b"row_key2") - row_set1.add_row_key(b"row_key3") - row_set2.add_row_key(b"row_key1") - row_set2.add_row_key(b"row_key2") - row_set2.add_row_key(b"row_key4") - - self.assertNotEqual(row_set1, row_set2) - - def test__eq__row_ranges_differ(self): - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key14", b"row_key19") - row_range3 = RowRange(b"row_key24", b"row_key29") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_range(row_range1) - row_set1.add_row_range(row_range2) - row_set1.add_row_range(row_range3) - row_set2.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__ne__(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key5", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__ne__same_value(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - comparison_val = row_set1 != row_set2 - self.assertFalse(comparison_val) - - def test_add_row_key(self): - row_set = self._make_one() - row_set.add_row_key("row_key1") - row_set.add_row_key("row_key2") - self.assertEqual(["row_key1", "row_key2"], row_set.row_keys) - - def test_add_row_range(self): - row_set = self._make_one() - row_range1 = RowRange(b"row_key1", b"row_key9") - row_range2 = RowRange(b"row_key21", b"row_key29") - row_set.add_row_range(row_range1) - row_set.add_row_range(row_range2) - expected = [row_range1, row_range2] - self.assertEqual(expected, row_set.row_ranges) - - def test_add_row_range_from_keys(self): - row_set = self._make_one() - row_set.add_row_range_from_keys( - start_key=b"row_key1", - end_key=b"row_key9", - start_inclusive=False, - end_inclusive=True, - ) - self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9") - - def test__update_message_request(self): - row_set = self._make_one() - table_name = "table_name" - row_set.add_row_key("row_key1") - row_range1 = RowRange(b"row_key21", b"row_key29") - row_set.add_row_range(row_range1) - - request = _ReadRowsRequestPB(table_name=table_name) - row_set._update_message_request(request) - - expected_request = _ReadRowsRequestPB(table_name=table_name) - expected_request.rows.row_keys.append(_to_bytes("row_key1")) - - expected_request.rows.row_ranges.add(**row_range1.get_range_kwargs()) - - self.assertEqual(request, expected_request) - - -class TestRowRange(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_set import RowRange - - return RowRange - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - start_key = "row_key1" - end_key = "row_key9" - row_range = self._make_one(start_key, end_key) - self.assertEqual(start_key, row_range.start_key) - self.assertEqual(end_key, row_range.end_key) - self.assertTrue(row_range.start_inclusive) - self.assertFalse(row_range.end_inclusive) - - def test___hash__set_equality(self): - row_range1 = self._make_one("row_key1", "row_key9") - row_range2 = self._make_one("row_key1", "row_key9") - set_one = {row_range1, row_range2} - set_two = {row_range1, row_range2} - self.assertEqual(set_one, set_two) - - def test___hash__not_equals(self): - row_range1 = self._make_one("row_key1", "row_key9") - row_range2 = self._make_one("row_key1", "row_key19") - set_one = {row_range1} - set_two = {row_range2} - self.assertNotEqual(set_one, set_two) - - def test__eq__(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, True, False) - self.assertEqual(row_range1, row_range2) - - def test___eq__type_differ(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = object() - self.assertNotEqual(row_range1, row_range2) - - def test__ne__(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, False, True) - self.assertNotEqual(row_range1, row_range2) - - def test__ne__same_value(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, True, False) - comparison_val = row_range1 != row_range2 - self.assertFalse(comparison_val) - - def test_get_range_kwargs_closed_open(self): - start_key = b"row_key1" - end_key = b"row_key9" - expected_result = {"start_key_closed": start_key, "end_key_open": end_key} - row_range = self._make_one(start_key, end_key) - actual_result = row_range.get_range_kwargs() - self.assertEqual(expected_result, actual_result) - - def test_get_range_kwargs_open_closed(self): - start_key = b"row_key1" - end_key = b"row_key9" - expected_result = {"start_key_open": start_key, "end_key_closed": end_key} - row_range = self._make_one(start_key, end_key, False, True) - actual_result = row_range.get_range_kwargs() - self.assertEqual(expected_result, actual_result) - - -def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/bigtable/tests/unit/test_table.py b/bigtable/tests/unit/test_table.py deleted file mode 100644 index d4bb621c28c0..000000000000 --- a/bigtable/tests/unit/test_table.py +++ /dev/null @@ -1,1937 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock -from ._testing import _make_credentials -from google.api_core.exceptions import DeadlineExceeded - - -class Test___mutate_rows_request(unittest.TestCase): - def _call_fut(self, table_name, rows): - from google.cloud.bigtable.table import _mutate_rows_request - - return _mutate_rows_request(table_name, rows) - - @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) - def test__mutate_rows_too_many_mutations(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import TooManyMutationsError - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - rows = [ - DirectRow(row_key=b"row_key", table=table), - DirectRow(row_key=b"row_key_2", table=table), - ] - rows[0].set_cell("cf1", b"c1", 1) - rows[0].set_cell("cf1", b"c1", 2) - rows[1].set_cell("cf1", b"c1", 3) - rows[1].set_cell("cf1", b"c1", 4) - with self.assertRaises(TooManyMutationsError): - self._call_fut("table", rows) - - def test__mutate_rows_request(self): - from google.cloud.bigtable.row import DirectRow - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - rows = [ - DirectRow(row_key=b"row_key", table=table), - DirectRow(row_key=b"row_key_2"), - ] - rows[0].set_cell("cf1", b"c1", b"1") - rows[1].set_cell("cf1", b"c1", b"2") - result = self._call_fut("table", rows) - - expected_result = _mutate_rows_request_pb(table_name="table") - entry1 = expected_result.entries.add() - entry1.row_key = b"row_key" - mutations1 = entry1.mutations.add() - mutations1.set_cell.family_name = "cf1" - mutations1.set_cell.column_qualifier = b"c1" - mutations1.set_cell.timestamp_micros = -1 - mutations1.set_cell.value = b"1" - entry2 = expected_result.entries.add() - entry2.row_key = b"row_key_2" - mutations2 = entry2.mutations.add() - mutations2.set_cell.family_name = "cf1" - mutations2.set_cell.column_qualifier = b"c1" - mutations2.set_cell.timestamp_micros = -1 - mutations2.set_cell.value = b"2" - - self.assertEqual(result, expected_result) - - -class Test__check_row_table_name(unittest.TestCase): - def _call_fut(self, table_name, row): - from google.cloud.bigtable.table import _check_row_table_name - - return _check_row_table_name(table_name, row) - - def test_wrong_table_name(self): - from google.cloud.bigtable.table import TableMismatchError - from google.cloud.bigtable.row import DirectRow - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - row = DirectRow(row_key=b"row_key", table=table) - with self.assertRaises(TableMismatchError): - self._call_fut("other_table", row) - - def test_right_table_name(self): - from google.cloud.bigtable.row import DirectRow - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - row = DirectRow(row_key=b"row_key", table=table) - result = self._call_fut("table", row) - self.assertFalse(result) - - -class Test__check_row_type(unittest.TestCase): - def _call_fut(self, row): - from google.cloud.bigtable.table import _check_row_type - - return _check_row_type(row) - - def test_test_wrong_row_type(self): - from google.cloud.bigtable.row import ConditionalRow - - row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) - with self.assertRaises(TypeError): - self._call_fut(row) - - def test_right_row_type(self): - from google.cloud.bigtable.row import DirectRow - - row = DirectRow(row_key=b"row_key", table="table") - result = self._call_fut(row) - self.assertFalse(result) - - -class TestTable(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - TABLE_ID = "table-id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - ROW_KEY = b"row-key" - ROW_KEY_1 = b"row-key-1" - ROW_KEY_2 = b"row-key-2" - ROW_KEY_3 = b"row-key-3" - FAMILY_NAME = u"family" - QUALIFIER = b"qualifier" - TIMESTAMP_MICROS = 100 - VALUE = b"value" - _json_tests = None - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.table import Table - - return Table - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_w_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) - - def test_constructor_wo_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=False - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) - - def _row_methods_helper(self): - client = self._make_client( - project="project-id", credentials=_make_credentials(), admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - row_key = b"row_key" - return table, row_key - - def test_row_factory_direct(self): - from google.cloud.bigtable.row import DirectRow - - table, row_key = self._row_methods_helper() - row = table.row(row_key) - - self.assertIsInstance(row, DirectRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_conditional(self): - from google.cloud.bigtable.row import ConditionalRow - - table, row_key = self._row_methods_helper() - filter_ = object() - row = table.row(row_key, filter_=filter_) - - self.assertIsInstance(row, ConditionalRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_append(self): - from google.cloud.bigtable.row import AppendRow - - table, row_key = self._row_methods_helper() - row = table.row(row_key, append=True) - - self.assertIsInstance(row, AppendRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_direct_row(self): - from google.cloud.bigtable.row import DirectRow - - table, row_key = self._row_methods_helper() - row = table.direct_row(row_key) - - self.assertIsInstance(row, DirectRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_conditional_row(self): - from google.cloud.bigtable.row import ConditionalRow - - table, row_key = self._row_methods_helper() - filter_ = object() - row = table.conditional_row(row_key, filter_=filter_) - - self.assertIsInstance(row, ConditionalRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_append_row(self): - from google.cloud.bigtable.row import AppendRow - - table, row_key = self._row_methods_helper() - row = table.append_row(row_key) - - self.assertIsInstance(row, AppendRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_failure(self): - table, row_key = self._row_methods_helper() - with self.assertRaises(ValueError): - table.row(row_key, filter_=object(), append=True) - - def test___eq__(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table1, table2) - - def test___eq__type_differ(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = object() - self.assertNotEqual(table1, table2) - - def test___ne__same_value(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = self._make_one(self.TABLE_ID, instance) - comparison_val = table1 != table2 - self.assertFalse(comparison_val) - - def test___ne__(self): - table1 = self._make_one("table_id1", None) - table2 = self._make_one("table_id2", None) - self.assertNotEqual(table1, table2) - - def _create_test_helper(self, split_keys=[], column_families={}): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable_admin_v2.proto import table_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, - ) - from google.cloud.bigtable.column_family import ColumnFamily - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Patch API calls - client._table_admin_client = table_api - - # Perform the method and check the result. - table.create(column_families=column_families, initial_split_keys=split_keys) - - families = { - id: ColumnFamily(id, self, rule).to_pb() - for (id, rule) in column_families.items() - } - - split = table_admin_messages_v2_pb2.CreateTableRequest.Split - splits = [split(key=split_key) for split_key in split_keys] - - table_api.create_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table=table_pb2.Table(column_families=families), - table_id=self.TABLE_ID, - initial_splits=splits, - ) - - def test_create(self): - self._create_test_helper() - - def test_create_with_families(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - families = {"family": MaxVersionsGCRule(5)} - self._create_test_helper(column_families=families) - - def test_create_with_split_keys(self): - self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, - ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, - bigtable_table_admin_client, - ) - from google.api_core.exceptions import NotFound - from google.api_core.exceptions import BadRequest - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - # Create response_pb - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)] - ) - - # Patch API calls - client._table_admin_client = table_api - client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport - bigtable_table_stub.get_table.side_effect = [ - response_pb, - NotFound("testing"), - BadRequest("testing"), - ] - - # Perform the method and check the result. - table1 = instance.table(self.TABLE_ID) - table2 = instance.table("table-id2") - - result = table1.exists() - self.assertEqual(True, result) - - result = table2.exists() - self.assertEqual(False, result) - - with self.assertRaises(BadRequest): - table2.exists() - - def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Patch API calls - client._table_admin_client = table_api - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = table.delete() - self.assertEqual(result, expected_result) - - def _list_column_families_helper(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_pb - COLUMN_FAMILY_ID = "foo" - column_family = _ColumnFamilyPB() - response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family}) - - # Patch the stub used by the API method. - client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport - bigtable_table_stub.get_table.side_effect = [response_pb] - - # Create expected_result. - expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)} - - # Perform the method and check the result. - result = table.list_column_families() - self.assertEqual(result, expected_result) - - def test_list_column_families(self): - self._list_column_families_helper() - - def test_get_cluster_states(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - INITIALIZING = enum_table.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE - READY = enum_table.ReplicationState.READY - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - response_pb = _TablePB( - cluster_states={ - "cluster-id1": _ClusterStatePB(INITIALIZING), - "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE), - "cluster-id3": _ClusterStatePB(READY), - } - ) - - # Patch the stub used by the API method. - client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport - bigtable_table_stub.get_table.side_effect = [response_pb] - - # build expected result - expected_result = { - u"cluster-id1": ClusterState(INITIALIZING), - u"cluster-id2": ClusterState(PLANNED_MAINTENANCE), - u"cluster-id3": ClusterState(READY), - } - - # Perform the method and check the result. - result = table.get_cluster_states() - self.assertEqual(result, expected_result) - - def _read_row_helper(self, chunks, expected_result, app_profile_id=None): - - from google.cloud._testing import _Monkey - from google.cloud.bigtable import table as MUT - from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable.row_filters import RowSampleFilter - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response_iterator - if chunks is None: - response_iterator = iter(()) # no responses at all - else: - response_pb = _ReadRowsResponsePB(chunks=chunks) - response_iterator = iter([response_pb]) - - # Patch the stub used by the API method. - client._table_data_client = data_api - client._table_admin_client = table_api - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) - - # Perform the method and check the result. - filter_obj = RowSampleFilter(0.33) - result = None - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_row(self.ROW_KEY, filter_=filter_obj) - row_set = RowSet() - row_set.add_row_key(self.ROW_KEY) - expected_request = [ - ( - table.name, - { - "end_inclusive": False, - "row_set": row_set, - "app_profile_id": app_profile_id, - "end_key": None, - "limit": None, - "start_key": None, - "filter_": filter_obj, - }, - ) - ] - self.assertEqual(result, expected_result) - self.assertEqual(mock_created, expected_request) - - def test_read_row_miss_no__responses(self): - self._read_row_helper(None, None) - - def test_read_row_miss_no_chunks_in_response(self): - chunks = [] - self._read_row_helper(chunks, None) - - def test_read_row_complete(self): - from google.cloud.bigtable.row_data import Cell - from google.cloud.bigtable.row_data import PartialRowData - - app_profile_id = "app-profile-id" - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - expected_result = PartialRowData(row_key=self.ROW_KEY) - family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) - column = family.setdefault(self.QUALIFIER, []) - column.append(Cell.from_pb(chunk)) - self._read_row_helper(chunks, expected_result, app_profile_id) - - def test_read_row_more_than_one_row_returned(self): - app_profile_id = "app-profile-id" - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - chunks = [chunk_1, chunk_2] - with self.assertRaises(ValueError): - self._read_row_helper(chunks, None, app_profile_id) - - def test_read_row_still_partial(self): - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - ) - # No "commit row". - chunks = [chunk] - with self.assertRaises(ValueError): - self._read_row_helper(chunks, None) - - def test_mutate_rows(self): - from google.rpc.status_pb2 import Status - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - client._table_admin_client = table_api - table = self._make_one(self.TABLE_ID, instance) - - response = [Status(code=0), Status(code=1)] - - mock_worker = mock.Mock(return_value=response) - with mock.patch( - "google.cloud.bigtable.table._RetryableMutateRowsWorker", - new=mock.MagicMock(return_value=mock_worker), - ): - statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) - result = [status.code for status in statuses] - expected_result = [0, 1] - - self.assertEqual(result, expected_result) - - def test_read_rows(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import table as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - app_profile_id = "app-profile-id" - table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) - - # Create request_pb - request = retry = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request - - # Create expected_result. - expected_result = PartialRowsData( - client._table_data_client.transport.read_rows, request, retry - ) - - # Perform the method and check the result. - start_key = b"start-key" - end_key = b"end-key" - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_rows( - start_key=start_key, - end_key=end_key, - filter_=filter_obj, - limit=limit, - retry=retry, - ) - - self.assertEqual(result.rows, expected_result.rows) - self.assertEqual(result.retry, expected_result.retry) - created_kwargs = { - "start_key": start_key, - "end_key": end_key, - "filter_": filter_obj, - "limit": limit, - "end_inclusive": False, - "app_profile_id": app_profile_id, - "row_set": None, - } - self.assertEqual(mock_created, [(table.name, created_kwargs)]) - - def test_read_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.api_core import retry - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_failure_iterator_1 = _MockFailureIterator_1() - response_failure_iterator_2 = _MockFailureIterator_2([response_1]) - response_iterator = _MockReadRowsIterator(response_2) - - # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] - ) - - rows = [] - for row in table.read_rows( - start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows - ): - rows.append(row) - - result = rows[1] - self.assertEqual(result.row_key, self.ROW_KEY_2) - - def test_yield_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - import warnings - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_failure_iterator_1 = _MockFailureIterator_1() - response_failure_iterator_2 = _MockFailureIterator_2([response_1]) - response_iterator = _MockReadRowsIterator(response_2) - - # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] - ) - - rows = [] - with warnings.catch_warnings(record=True) as warned: - for row in table.yield_rows( - start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2 - ): - rows.append(row) - - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - result = rows[1] - self.assertEqual(result.row_key, self.ROW_KEY_2) - - def test_yield_rows_with_row_set(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable.row_set import RowRange - import warnings - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - chunk_3 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_3, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_3 = _ReadRowsResponseV2([chunk_3]) - response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) - - # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) - - rows = [] - row_set = RowSet() - row_set.add_row_range( - RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2) - ) - row_set.add_row_key(self.ROW_KEY_3) - - with warnings.catch_warnings(record=True) as warned: - for row in table.yield_rows(row_set=row_set): - rows.append(row) - - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - self.assertEqual(rows[0].row_key, self.ROW_KEY_1) - self.assertEqual(rows[1].row_key, self.ROW_KEY_2) - self.assertEqual(rows[2].row_key, self.ROW_KEY_3) - - def test_sample_row_keys(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_iterator - response_iterator = object() # Just passed to a mock. - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["sample_row_keys"] = mock.Mock( - side_effect=[[response_iterator]] - ) - - # Create expected_result. - expected_result = response_iterator - - # Perform the method and check the result. - result = table.sample_row_keys() - self.assertEqual(result[0], expected_result) - - def test_truncate(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - expected_result = None # truncate() has no return value. - with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME): - result = table.truncate() - - table_api.drop_row_range.assert_called_once_with( - name=self.TABLE_NAME, delete_all_data_from_table=True - ) - - self.assertEqual(result, expected_result) - - def test_truncate_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - expected_result = None # truncate() has no return value. - - timeout = 120 - result = table.truncate(timeout=timeout) - - self.assertEqual(result, expected_result) - - def test_drop_by_prefix(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - expected_result = None # drop_by_prefix() has no return value. - - row_key_prefix = "row-key-prefix" - - result = table.drop_by_prefix(row_key_prefix=row_key_prefix) - - self.assertEqual(result, expected_result) - - def test_drop_by_prefix_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - expected_result = None # drop_by_prefix() has no return value. - - row_key_prefix = "row-key-prefix" - - timeout = 120 - result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout) - - self.assertEqual(result, expected_result) - - def test_mutations_batcher_factory(self): - flush_count = 100 - max_row_bytes = 1000 - table = self._make_one(self.TABLE_ID, None) - mutation_batcher = table.mutations_batcher( - flush_count=flush_count, max_row_bytes=max_row_bytes - ) - - self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID) - self.assertEqual(mutation_batcher.flush_count, flush_count) - self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) - - def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - client._table_admin_client = table_api - table_api.get_iam_policy.return_value = iam_policy - - result = table.get_iam_policy() - - table_api.get_iam_policy.assert_called_once_with(resource=table.name) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] - iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - client._table_admin_client = table_api - table_api.set_iam_policy.return_value = iam_policy_pb - - iam_policy = Policy(etag=etag, version=version) - iam_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("user1@test.com"), - Policy.service_account("service_acc1@test.com"), - ] - - result = table.set_iam_policy(iam_policy) - - table_api.set_iam_policy.assert_called_once_with( - resource=table.name, policy=iam_policy_pb - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.iam.v1 import iam_policy_pb2 - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - - response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - table_api.test_iam_permissions.return_value = response - client._table_admin_client = table_api - - result = table.test_iam_permissions(permissions) - - self.assertEqual(result, permissions) - table_api.test_iam_permissions.assert_called_once_with( - resource=table.name, permissions=permissions - ) - - -class Test__RetryableMutateRowsWorker(unittest.TestCase): - from grpc import StatusCode - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - TABLE_ID = "table-id" - - # RPC Status Codes - SUCCESS = StatusCode.OK.value[0] - RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] - RETRYABLE_2 = StatusCode.ABORTED.value[0] - NON_RETRYABLE = StatusCode.CANCELLED.value[0] - - @staticmethod - def _get_target_class_for_worker(): - from google.cloud.bigtable.table import _RetryableMutateRowsWorker - - return _RetryableMutateRowsWorker - - def _make_worker(self, *args, **kwargs): - return self._get_target_class_for_worker()(*args, **kwargs) - - @staticmethod - def _get_target_class_for_table(): - from google.cloud.bigtable.table import Table - - return Table - - def _make_table(self, *args, **kwargs): - return self._get_target_class_for_table()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def _make_responses_statuses(self, codes): - from google.rpc.status_pb2 import Status - - response = [Status(code=code) for code in codes] - return response - - def _make_responses(self, codes): - import six - from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse - from google.rpc.status_pb2 import Status - - entries = [ - MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) - for i in six.moves.xrange(len(codes)) - ] - return MutateRowsResponse(entries=entries) - - def test_callable_empty_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - worker = self._make_worker(client, table.name, []) - statuses = worker() - - self.assertEqual(len(statuses), 0) - - def test_callable_no_retry_strategy(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 3 rows. - # Action: - # - Attempt to mutate the rows w/o any retry strategy. - # Expectation: - # - Since no retry, should return statuses as they come back. - # - Even if there are retryable errors, no retry attempt is made. - # - State of responses_statuses should be - # [success, retryable, non-retryable] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - - response = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) - - with mock.patch("google.cloud.bigtable.table.wrap_method") as patched: - patched.return_value = mock.Mock(return_value=[response]) - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=None) - - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - - client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once() - self.assertEqual(result, expected_result) - - def test_callable_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 3 rows. - # Action: - # - Initial attempt will mutate all 3 rows. - # Expectation: - # - First attempt will result in one retryable error. - # - Second attempt will result in success for the retry-ed row. - # - Check MutateRows is called twice. - # - State of responses_statuses should be - # [success, success, non-retryable] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - - response_1 = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) - response_2 = self._make_responses([self.SUCCESS]) - - # Patch the stub used by the API method. - client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock( - side_effect=[[response_1], [response_2]] - ) - - retry = DEFAULT_RETRY.with_delay(initial=0.1) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=retry) - - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] - - self.assertEqual( - client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2 - ) - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_empty_rows(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - worker = self._make_worker(client, table.name, []) - statuses = worker._do_mutate_retryable_rows() - - self.assertEqual(len(statuses), 0) - - def test_do_mutate_retryable_rows(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 2 rows. - # Action: - # - Initial attempt will mutate all 2 rows. - # Expectation: - # - Expect [success, non-retryable] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - - worker = self._make_worker(client, table.name, [row_1, row_2]) - statuses = worker._do_mutate_retryable_rows() - - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.NON_RETRYABLE] - - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 3 rows. - # Action: - # - Initial attempt will mutate all 3 rows. - # Expectation: - # - Second row returns retryable error code, so expect a raise. - # - State of responses_statuses should be - # [success, retryable, non-retryable] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - - response = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - - with self.assertRaises(_BigtableRetryableError): - worker._do_mutate_retryable_rows() - - statuses = worker.responses_statuses - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_second_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 4 rows. - # - First try results: - # [success, retryable, non-retryable, retryable] - # Action: - # - Second try should re-attempt the 'retryable' rows. - # Expectation: - # - After second try: - # [success, success, non-retryable, retryable] - # - One of the rows tried second time returns retryable error code, - # so expect a raise. - # - Exception contains response whose index should be '3' even though - # only two rows were retried. - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - row_4 = DirectRow(row_key=b"row_key_4", table=table) - row_4.set_cell("cf", b"col", b"value4") - - response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] - ) - - with self.assertRaises(_BigtableRetryableError): - worker._do_mutate_retryable_rows() - - statuses = worker.responses_statuses - result = [status.code for status in statuses] - expected_result = [ - self.SUCCESS, - self.SUCCESS, - self.NON_RETRYABLE, - self.RETRYABLE_1, - ] - - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_second_try(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 4 rows. - # - First try results: - # [success, retryable, non-retryable, retryable] - # Action: - # - Second try should re-attempt the 'retryable' rows. - # Expectation: - # - After second try: - # [success, non-retryable, non-retryable, success] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - row_4 = DirectRow(row_key=b"row_key_4", table=table) - row_4.set_cell("cf", b"col", b"value4") - - response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] - ) - - statuses = worker._do_mutate_retryable_rows() - - result = [status.code for status in statuses] - expected_result = [ - self.SUCCESS, - self.NON_RETRYABLE, - self.NON_RETRYABLE, - self.SUCCESS, - ] - - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_second_try_no_retryable(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 2 rows. - # - First try results: [success, non-retryable] - # Action: - # - Second try has no row to retry. - # Expectation: - # - After second try: [success, non-retryable] - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - worker = self._make_worker(client, table.name, [row_1, row_2]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.NON_RETRYABLE] - ) - - statuses = worker._do_mutate_retryable_rows() - - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.NON_RETRYABLE] - - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_mismatch_num_responses(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - response = self._make_responses([self.SUCCESS]) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - - worker = self._make_worker(client, table.name, [row_1, row_2]) - with self.assertRaises(RuntimeError): - worker._do_mutate_retryable_rows() - - -class Test__create_row_request(unittest.TestCase): - def _call_fut( - self, - table_name, - start_key=None, - end_key=None, - filter_=None, - limit=None, - end_inclusive=False, - app_profile_id=None, - row_set=None, - ): - - from google.cloud.bigtable.table import _create_row_request - - return _create_row_request( - table_name, - start_key=start_key, - end_key=end_key, - filter_=filter_, - limit=limit, - end_inclusive=end_inclusive, - app_profile_id=app_profile_id, - row_set=row_set, - ) - - def test_table_name_only(self): - table_name = "table_name" - result = self._call_fut(table_name) - expected_result = _ReadRowsRequestPB(table_name=table_name) - self.assertEqual(result, expected_result) - - def test_row_range_row_set_conflict(self): - with self.assertRaises(ValueError): - self._call_fut(None, end_key=object(), row_set=object()) - - def test_row_range_start_key(self): - table_name = "table_name" - start_key = b"start_key" - result = self._call_fut(table_name, start_key=start_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(start_key_closed=start_key) - self.assertEqual(result, expected_result) - - def test_row_range_end_key(self): - table_name = "table_name" - end_key = b"end_key" - result = self._call_fut(table_name, end_key=end_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(end_key_open=end_key) - self.assertEqual(result, expected_result) - - def test_row_range_both_keys(self): - table_name = "table_name" - start_key = b"start_key" - end_key = b"end_key" - result = self._call_fut(table_name, start_key=start_key, end_key=end_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_open=end_key - ) - self.assertEqual(result, expected_result) - - def test_row_range_both_keys_inclusive(self): - table_name = "table_name" - start_key = b"start_key" - end_key = b"end_key" - result = self._call_fut( - table_name, start_key=start_key, end_key=end_key, end_inclusive=True - ) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_closed=end_key - ) - self.assertEqual(result, expected_result) - - def test_with_filter(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - table_name = "table_name" - row_filter = RowSampleFilter(0.33) - result = self._call_fut(table_name, filter_=row_filter) - expected_result = _ReadRowsRequestPB( - table_name=table_name, filter=row_filter.to_pb() - ) - self.assertEqual(result, expected_result) - - def test_with_limit(self): - table_name = "table_name" - limit = 1337 - result = self._call_fut(table_name, limit=limit) - expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit) - self.assertEqual(result, expected_result) - - def test_with_row_set(self): - from google.cloud.bigtable.row_set import RowSet - - table_name = "table_name" - row_set = RowSet() - result = self._call_fut(table_name, row_set=row_set) - expected_result = _ReadRowsRequestPB(table_name=table_name) - self.assertEqual(result, expected_result) - - def test_with_app_profile_id(self): - table_name = "table_name" - limit = 1337 - app_profile_id = "app-profile-id" - result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id) - expected_result = _ReadRowsRequestPB( - table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id - ) - self.assertEqual(result, expected_result) - - -def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.ReadRowsRequest(*args, **kw) - - -class Test_ClusterState(unittest.TestCase): - def test___eq__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = ClusterState(READY) - self.assertEqual(state1, state2) - - def test___eq__type_differ(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = object() - self.assertNotEqual(state1, state2) - - def test___ne__same_value(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = ClusterState(READY) - comparison_val = state1 != state2 - self.assertFalse(comparison_val) - - def test___ne__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - INITIALIZING = enum_table.ReplicationState.INITIALIZING - state1 = ClusterState(READY) - state2 = ClusterState(INITIALIZING) - self.assertNotEqual(state1, state2) - - def test__repr__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enum_table.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE - UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE - READY = enum_table.ReplicationState.READY - - replication_dict = { - STATE_NOT_KNOWN: "STATE_NOT_KNOWN", - INITIALIZING: "INITIALIZING", - PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", - UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", - READY: "READY", - } - - self.assertEqual( - str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN] - ) - self.assertEqual( - str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING] - ) - self.assertEqual( - str(ClusterState(PLANNED_MAINTENANCE)), - replication_dict[PLANNED_MAINTENANCE], - ) - self.assertEqual( - str(ClusterState(UNPLANNED_MAINTENANCE)), - replication_dict[UNPLANNED_MAINTENANCE], - ) - self.assertEqual(str(ClusterState(READY)), replication_dict[READY]) - - self.assertEqual( - ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN - ) - self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING) - self.assertEqual( - ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE - ) - self.assertEqual( - ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE - ) - self.assertEqual(ClusterState(READY).replication_state, READY) - - -def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - family_name = kw.pop("family_name") - qualifier = kw.pop("qualifier") - message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) - message.family_name.value = family_name - message.qualifier.value = qualifier - return message - - -def _ReadRowsResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - - return messages_v2_pb2.ReadRowsResponse(*args, **kw) - - -def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 - - return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) - - -class _MockReadRowsIterator(object): - def __init__(self, *values): - self.iter_values = iter(values) - - def next(self): - return next(self.iter_values) - - __next__ = next - - -class _MockFailureIterator_1(object): - def next(self): - raise DeadlineExceeded("Failed to read from server") - - __next__ = next - - -class _MockFailureIterator_2(object): - def __init__(self, *values): - self.iter_values = values[0] - self.calls = 0 - - def next(self): - self.calls += 1 - if self.calls == 1: - return self.iter_values[0] - else: - raise DeadlineExceeded("Failed to read from server") - - __next__ = next - - -class _ReadRowsResponseV2(object): - def __init__(self, chunks, last_scanned_row_key=""): - self.chunks = chunks - self.last_scanned_row_key = last_scanned_row_key - - -def _TablePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.Table(*args, **kw) - - -def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.ColumnFamily(*args, **kw) - - -def _ClusterStatePB(replication_state): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - - return table_v2_pb2.Table.ClusterState(replication_state=replication_state) - - -def _read_rows_retry_exception(exc): - return isinstance(exc, DeadlineExceeded) diff --git a/firestore/.coveragerc b/firestore/.coveragerc deleted file mode 100644 index b178b094aa1d..000000000000 --- a/firestore/.coveragerc +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[run] -branch = True - -[report] -fail_under = 100 -show_missing = True -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py \ No newline at end of file diff --git a/firestore/.flake8 b/firestore/.flake8 deleted file mode 100644 index 0268ecc9c55c..000000000000 --- a/firestore/.flake8 +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - *_pb2.py - - # Standard linting exemptions. - __pycache__, - .git, - *.pyc, - conf.py diff --git a/firestore/.repo-metadata.json b/firestore/.repo-metadata.json deleted file mode 100644 index 6a3e669fce83..000000000000 --- a/firestore/.repo-metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "firestore", - "name_pretty": "Cloud Firestore", - "product_documentation": "https://cloud.google.com/firestore", - "client_documentation": "https://googleapis.dev/python/firestore/latest", - "issue_tracker": "https://issuetracker.google.com/savedsearches/5337669", - "release_level": "beta", - "language": "python", - "repo": "googleapis/google-cloud-python", - "distribution_name": "google-cloud-firestore", - "api_id": "firestore.googleapis.com", - "requires_billing": true -} \ No newline at end of file diff --git a/firestore/CHANGELOG.md b/firestore/CHANGELOG.md deleted file mode 100644 index a0841a07158b..000000000000 --- a/firestore/CHANGELOG.md +++ /dev/null @@ -1,335 +0,0 @@ -# Changelog - -[PyPI History][1] - -[1]: https://pypi.org/project/google-cloud-firestore/#history - - -## 1.6.1 - -01-02-2020 10:35 PST - - -### Implementation Changes -- Recover watch streams on more error types ([#9995](https://github.com/googleapis/google-cloud-python/pull/9995)) -- Simplify 'Collection.add' and avoid a spurious API call ([#9634](https://github.com/googleapis/google-cloud-python/pull/9634)) - -### Documentation -- Add new where operators to docstring ([#9789](https://github.com/googleapis/google-cloud-python/pull/9789)) -- Change spacing in docs templates (via synth) ([#9750](https://github.com/googleapis/google-cloud-python/pull/9750)) -- Add python 2 sunset banner to documentation ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036)) - -## 1.6.0 - -11-06-2019 13:49 PST - -### New Features -- Surface new 'IN' and 'ARRAY_CONTAINS_ANY' query operators. ([#9541](https://github.com/googleapis/google-cloud-python/pull/9541)) - -## 1.5.0 - -10-15-2019 06:45 PDT - - -### Implementation Changes -- Expand dotted keys in mappings used as cursors. ([#8568](https://github.com/googleapis/google-cloud-python/pull/8568)) -- Tweak GAPIC client configuration (via synth). ([#9173](https://github.com/googleapis/google-cloud-python/pull/9173)) - -### New Features -- Add `IN`, `ARRAY_CONTAINS_ANY` operators; update docstrings (via synth). ([#9439](https://github.com/googleapis/google-cloud-python/pull/9439)) -- Add `COLLECTION_GROUP` to `Index.QueryScope` enum; update docstrings (via synth). ([#9253](https://github.com/googleapis/google-cloud-python/pull/9253)) -- Add `client_options` to v1 client. ([#9048](https://github.com/googleapis/google-cloud-python/pull/9048)) - -### Dependencies -- Pin 'google-cloud-core >= 1.0.3, < 2.0.0dev'. ([#9445](https://github.com/googleapis/google-cloud-python/pull/9445)) - -### Documentation -- Update README example to use non-deprecated `query.get`. ([#9235](https://github.com/googleapis/google-cloud-python/pull/9235)) -- Remove duplicated word in README. ([#9297](https://github.com/googleapis/google-cloud-python/pull/9297)) -- Fix intersphinx reference to `requests`. ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294)) -- Remove CI for gh-pages, use googleapis.dev for `api_core refs`. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085)) -- Add license file. ([#9109](https://github.com/googleapis/google-cloud-python/pull/9109)) -- Fix reference to library name ([#9047](https://github.com/googleapis/google-cloud-python/pull/9047)) -- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035)) - -## 1.4.0 - -08-06-2019 11:43 PDT - -### New Features -- Support emulator in client. ([#8721](https://github.com/googleapis/google-cloud-python/pull/8721)) -- Add GAPIC client for Admin V1. ([#8667](https://github.com/googleapis/google-cloud-python/pull/8667)) -- Add `Transaction.get` / `Transaction.get_all`. ([#8628](https://github.com/googleapis/google-cloud-python/pull/8628)) - -### Implementation Changes -- Remove send/recv msg size limit (via synth). ([#8955](https://github.com/googleapis/google-cloud-python/pull/8955)) -- Deprecate `v1beta1` API / client. ([#8886](https://github.com/googleapis/google-cloud-python/pull/8886)) -- Allow snapshot cursors from other collections for collection group queries. ([#8882](https://github.com/googleapis/google-cloud-python/pull/8882)) -- Fix sorting `delete_changes` in `Watch._compute_snapshot`. ([#8809](https://github.com/googleapis/google-cloud-python/pull/8809)) -- Treat `None` as EOF in `Watch.on_snapshot`. ([#8687](https://github.com/googleapis/google-cloud-python/pull/8687)) -- Fix V1 `Client.collections` method. ([#8718](https://github.com/googleapis/google-cloud-python/pull/8718)) -- Avoid adding `prefix` to update mask for transforms used in `update`. ([#8701](https://github.com/googleapis/google-cloud-python/pull/8701)) -- Add `should_terminate` predicate for clean BiDi shutdown. ([#8650](https://github.com/googleapis/google-cloud-python/pull/8650)) - -### Dependencies -- Bump minimum version for google-api-core to 1.14.0. ([#8709](https://github.com/googleapis/google-cloud-python/pull/8709)) - -### Documentation -- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805)) -- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705)) -- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288)) - -### Internal / Testing Changes -- Preserve manual change in noxfile (run systests verbosely). ([#8744](https://github.com/googleapis/google-cloud-python/pull/8744)) -- Update V1 conformance tests to match new repo / format. ([#8689](https://github.com/googleapis/google-cloud-python/pull/8689)) -- Improve cleanups for `watch` system tests. ([#8638](https://github.com/googleapis/google-cloud-python/pull/8638)) -- Avoid sharing top-level collection across test cases / CI runs. ([#8637](https://github.com/googleapis/google-cloud-python/pull/8637)) - -## 1.3.0 - -07-09-2019 13:19 PDT - - -### Implementation Changes -- Add missing transforms to 'google.cloud.firestore' shim. ([#8481](https://github.com/googleapis/google-cloud-python/pull/8481)) -- Preserve reference to missing documents in 'Client.get_all'. ([#8472](https://github.com/googleapis/google-cloud-python/pull/8472)) -- Add gRPC keepalive to gapic client initialization. ([#8264](https://github.com/googleapis/google-cloud-python/pull/8264)) -- Add disclaimer to auto-generated template files. ([#8314](https://github.com/googleapis/google-cloud-python/pull/8314)) -- Use correct environment variable to guard the 'system' part. ([#7912](https://github.com/googleapis/google-cloud-python/pull/7912)) - -### New Features -- Add 'client_options' support, update list method docstrings (via synth). ([#8509](https://github.com/googleapis/google-cloud-python/pull/8509)) -- Allow kwargs to be passed to create_channel (via synth). ([#8390](https://github.com/googleapis/google-cloud-python/pull/8390)) -- Add 'FieldPath.documentId()'. ([#8543](https://github.com/googleapis/google-cloud-python/pull/8543)) - -### Documentation -- Fix docstring example for 'Client.collection_group'. ([#8438](https://github.com/googleapis/google-cloud-python/pull/8438)) -- Normalize docstring class refs. ([#8102](https://github.com/googleapis/google-cloud-python/pull/8102)) - -### Internal / Testing Changes -- Pin black version (via synth). ([#8583](https://github.com/googleapis/google-cloud-python/pull/8583)) -- All: Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464)) -- Declare encoding as utf-8 in pb2 files (via synth). ([#8352](https://github.com/googleapis/google-cloud-python/pull/8352)) -- Suppress checking 'cov-fail-under' in nox default session (via synth). ([#8241](https://github.com/googleapis/google-cloud-python/pull/8241)) -- Blacken noxfile.py, setup.py (via synth). ([#8123](https://github.com/googleapis/google-cloud-python/pull/8123)) -- Add empty lines (via synth). ([#8058](https://github.com/googleapis/google-cloud-python/pull/8058)) - -## 1.2.0 - -05-16-2019 12:25 PDT - - -### New Features -- Add support for numeric transforms: `increment` / `maximum` / `minimum`. ([#7989](https://github.com/googleapis/google-cloud-python/pull/7989)) -- Add `client_info` support to V1 client. ([#7877](https://github.com/googleapis/google-cloud-python/pull/7877)) and ([#7898](https://github.com/googleapis/google-cloud-python/pull/7898)) - -### Dependencies -- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993)) - -### Internal / Testing Changes -- Add nox session `docs`, add routing header to method metadata, reorder methods (via synth).. ([#7771](https://github.com/googleapis/google-cloud-python/pull/7771)) - -## 1.1.0 - -04-30-2019 12:29 PDT - - -### New Features -- Add support for CollectionGroup queries. ([#7758](https://github.com/googleapis/google-cloud-python/pull/7758)) - -## 1.0.0 - -04-30-2019 10:00 PDT - -### Implementation Changes -- Use parent path for watch on queries. ([#7752](https://github.com/googleapis/google-cloud-python/pull/7752)) -- Add routing header to method metadata (via synth). ([#7749](https://github.com/googleapis/google-cloud-python/pull/7749)) - -## 0.32.1 - -04-05-2019 10:51 PDT - - -### Dependencies -- Update google-api-core dependency - -## 0.32.0 - -04-01-2019 11:44 PDT - - -### Implementation Changes -- Allow passing metadata as part of creating a bidi ([#7514](https://github.com/googleapis/google-cloud-python/pull/7514)) -- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535)) -- Rename 'Query.get' -> 'stream'. ([#7284](https://github.com/googleapis/google-cloud-python/pull/7284)) -- Remove bogus error checking of query response stream. ([#7206](https://github.com/googleapis/google-cloud-python/pull/7206)) --'increment' / 'minimum' / 'maximum' field transform attributes. ([#7129](https://github.com/googleapis/google-cloud-python/pull/7129)) -- Respect transform values passed into collection.add ([#7072](https://github.com/googleapis/google-cloud-python/pull/7072)) -- Protoc-generated serialization update. ([#7083](https://github.com/googleapis/google-cloud-python/pull/7083)) - -### New Features -- Firestore: Add v1 API version. ([#7494](https://github.com/googleapis/google-cloud-python/pull/7494)) -- Add 'Collection.list_documents' method. ([#7221](https://github.com/googleapis/google-cloud-python/pull/7221)) -- Add 'DocumentReference.path' property. ([#7219](https://github.com/googleapis/google-cloud-python/pull/7219)) - -### Documentation -- Updated client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307)) -- Fix the docstring example for 'Query.on_snapshot'. ([#7281](https://github.com/googleapis/google-cloud-python/pull/7281)) -- Update copyright headers - -### Internal / Testing Changes -- Fix typo in proto comments (via synth). -- Prep firestore unit tests for generation from 'v1' protos. ([#7437](https://github.com/googleapis/google-cloud-python/pull/7437)) -- Copy lintified proto files (via synth). ([#7466](https://github.com/googleapis/google-cloud-python/pull/7466)) -- Add clarifying comment to blacken nox target. ([#7392](https://github.com/googleapis/google-cloud-python/pull/7392)) -- Add protos as an artifact to library ([#7205](https://github.com/googleapis/google-cloud-python/pull/7205)) - -## 0.31.0 - -12-18-2018 11:20 PST - - -### Implementation Changes -- Implement equality semantics for public types ([#6916](https://github.com/googleapis/google-cloud-python/pull/6916)) -- Pick up stub docstring fix in GAPIC generator. ([#6988](https://github.com/googleapis/google-cloud-python/pull/6988)) -- Use 'DatetimeWithNanos' for converting timestamp messages. ([#6920](https://github.com/googleapis/google-cloud-python/pull/6920)) -- Enable use of 'WriteBatch' as a context manager. ([#6912](https://github.com/googleapis/google-cloud-python/pull/6912)) -- Document timeouts for 'Query.get' / 'Collection.get'. ([#6853](https://github.com/googleapis/google-cloud-python/pull/6853)) -- Normalize FieldPath parsing / escaping ([#6904](https://github.com/googleapis/google-cloud-python/pull/6904)) -- For queries ordered on `__name__`, expand field values to full paths. ([#6829](https://github.com/googleapis/google-cloud-python/pull/6829)) -- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741)) -- Prevent use of transforms as values passed to 'Query.where'. ([#6703](https://github.com/googleapis/google-cloud-python/pull/6703)) -- 'Query.select([])' implies `__name__`. ([#6735](https://github.com/googleapis/google-cloud-python/pull/6735)) -- Reject invalid paths passed to 'Query.{select,where,order_by}' ([#6770](https://github.com/googleapis/google-cloud-python/pull/6770)) -- Prevent use of transforms as cursor values. ([#6706](https://github.com/googleapis/google-cloud-python/pull/6706)) -- Refactor 'Document.get' to use the 'GetDocument' API. ([#6534](https://github.com/googleapis/google-cloud-python/pull/6534)) -- Pick up enum fixes in the GAPIC generator. ([#6612](https://github.com/googleapis/google-cloud-python/pull/6612)) -- Pick up changes to GAPIC client config. ([#6589](https://github.com/googleapis/google-cloud-python/pull/6589)) -- Suppress deprecation warnings for 'assertRaisesRegexp'. ([#6543](https://github.com/googleapis/google-cloud-python/pull/6543)) -- Firestore: pick up fixes to GAPIC generator. ([#6523](https://github.com/googleapis/google-cloud-python/pull/6523)) -- Fix `client_info` bug, update docstrings. ([#6412](https://github.com/googleapis/google-cloud-python/pull/6412)) -- Block calling 'DocumentRef.get()' with a single string. ([#6270](https://github.com/googleapis/google-cloud-python/pull/6270)) - -### New Features -- Impose required semantics for snapshots as cursors: ([#6837](https://github.com/googleapis/google-cloud-python/pull/6837)) -- Make cursor-related 'Query' methods accept lists ([#6697](https://github.com/googleapis/google-cloud-python/pull/6697)) -- Add 'Client.collections' method. ([#6650](https://github.com/googleapis/google-cloud-python/pull/6650)) -- Add support for 'ArrayRemove' / 'ArrayUnion' transforms ([#6651](https://github.com/googleapis/google-cloud-python/pull/6651)) -- Add support for `array_contains` query operator. ([#6481](https://github.com/googleapis/google-cloud-python/pull/6481)) -- Add Watch Support ([#6191](https://github.com/googleapis/google-cloud-python/pull/6191)) -- Remove use of deprecated 'channel' argument. ([#6271](https://github.com/googleapis/google-cloud-python/pull/6271)) - -### Dependencies -- Pin 'google-api_core >= 1.7.0'. ([#6937](https://github.com/googleapis/google-cloud-python/pull/6937)) -- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835)) -- Bump minimum 'api_core' version for all GAPIC libs to 1.4.1. ([#6391](https://github.com/googleapis/google-cloud-python/pull/6391)) - -### Documentation -- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910)) -- Nnormalize docs for `page_size` / `max_results` / `page_token`. ([#6842](https://github.com/googleapis/google-cloud-python/pull/6842)) -- Port changelog from 30.1 branch to master ([#6903](https://github.com/googleapis/google-cloud-python/pull/6903)) -- Normalize use of support level badges ([#6159](https://github.com/googleapis/google-cloud-python/pull/6159)) - -### Internal / Testing Changes -- Add driver for listen conformance tests. ([#6935](https://github.com/googleapis/google-cloud-python/pull/6935)) -- Add driver for query conformance tests. ([#6839](https://github.com/googleapis/google-cloud-python/pull/6839)) -- Update noxfile. -- Blacken libraries ([#6794](https://github.com/googleapis/google-cloud-python/pull/6794)) -- Omit local deps ([#6701](https://github.com/googleapis/google-cloud-python/pull/6701)) -- Run black at end of synth.py ([#6698](https://github.com/googleapis/google-cloud-python/pull/6698)) -- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642)) -- Fix delete conformance ([#6559](https://github.com/googleapis/google-cloud-python/pull/6559)) -- Add synth metadata. ([#6567](https://github.com/googleapis/google-cloud-python/pull/6567)) -- Refactor conformance tests. ([#6291](https://github.com/googleapis/google-cloud-python/pull/6291)) -- Import stdlib ABCs from 'collections.abc' rather than 'collections'. ([#6451](https://github.com/googleapis/google-cloud-python/pull/6451)) -- Fix path of tests-to-include in MANIFEST.in ([#6381](https://github.com/googleapis/google-cloud-python/pull/6381)) -- Fix error from new flake8 version. ([#6320](https://github.com/googleapis/google-cloud-python/pull/6320)) - -## 0.30.1 - -12-11-2018 10:49 PDT - - -### Dependencies -- Update `core` and `api_core` dependencies to latest versions. - -## 0.30.0 - -10-15-2018 09:04 PDT - - -### New Features -- Add `Document.collections` method. ([#5613](https://github.com/googleapis/google-cloud-python/pull/5613)) -- Add `merge` as an option to `DocumentReference.set()` ([#4851](https://github.com/googleapis/google-cloud-python/pull/4851)) -- Return emtpy snapshot instead of raising NotFound exception ([#5007](https://github.com/googleapis/google-cloud-python/pull/5007)) -- Add Field path class ([#4392](https://github.com/googleapis/google-cloud-python/pull/4392)) - -### Implementation Changes -- Avoid overwriting `__module__` of messages from shared modules. ([#5364](https://github.com/googleapis/google-cloud-python/pull/5364)) -- Don't omit originally-empty map values when processing timestamps. ([#6050](https://github.com/googleapis/google-cloud-python/pull/6050)) - -### Documentation -- Prep docs for repo split. ([#6000](https://github.com/googleapis/google-cloud-python/pull/6000)) -- Docs: Replace links to `/stable/` with `/latest/`. ([#5901](https://github.com/googleapis/google-cloud-python/pull/5901)) -- Document `FieldPath.from_string` ([#5121](https://github.com/googleapis/google-cloud-python/pull/5121)) - -### Internal / Testing Changes -- Use new Nox ([#6175](https://github.com/googleapis/google-cloud-python/pull/6175)) -- Add new conformance tests. ([#6124](https://github.com/googleapis/google-cloud-python/pull/6124)) -- Add `synth.py`. ([#6079](https://github.com/googleapis/google-cloud-python/pull/6079)) -- Test document update w/ integer ids ([#5895](https://github.com/googleapis/google-cloud-python/pull/5895)) -- Nox: use inplace installs ([#5865](https://github.com/googleapis/google-cloud-python/pull/5865)) -- Re-sync with .proto / .textproto files from google-cloud-common. ([#5351](https://github.com/googleapis/google-cloud-python/pull/5351)) -- Modify system tests to use prerelease versions of grpcio ([#5304](https://github.com/googleapis/google-cloud-python/pull/5304)) -- Add test runs for Python 3.7 and remove 3.4 ([#5295](https://github.com/googleapis/google-cloud-python/pull/5295)) -- Fix over-long line. ([#5129](https://github.com/googleapis/google-cloud-python/pull/5129)) -- Distinguish `FieldPath` classes from field path strings ([#4466](https://github.com/googleapis/google-cloud-python/pull/4466)) -- Fix bad trove classifier -- Cleanup `FieldPath` ([#4996](https://github.com/googleapis/google-cloud-python/pull/4996)) -- Fix typo in `Document.collections` docstring. ([#5669](https://github.com/googleapis/google-cloud-python/pull/5669)) -- Implement `FieldPath.__add__` ([#5149](https://github.com/googleapis/google-cloud-python/pull/5149)) - -## 0.29.0 - -### New features - -- All non-simple field names are converted into unicode (#4859) - -### Implementation changes - -- The underlying generated code has been re-generated to pick up new features and bugfixes. (#4916) -- The `Admin` API interface has been temporarily removed. - -### Dependencies - -- Update dependency range for api-core to include v1.0.0 releases (#4944) -- The minimum version for `google-api-core` has been updated to version 1.0.0. This may cause some incompatibility with older google-cloud libraries, you will need to update those libraries if you have a dependency conflict. (#4944, #4946) - -### Documentation - -- Fixing "Fore" -> "For" typo in README docs. (#4317) - -### Testing and internal changes - -- Install local dependencies when running lint (#4936) -- Re-enable lint for tests, remove usage of pylint (#4921) -- Normalize all setup.py files (#4909) -- System test fix, changed ALREADY_EXISTS and MISSING_ENTITY to DOCUMENT_EXISTS and MISSING_DOCUMENT and updated wording (#4803) -- Cross-language tests (#4359) -- Fix import column lengths pass 79 (#4464) -- Making a `nox -s default` session for all packages. (#4324) -- Shorten test names (#4321) - -## 0.28.0 - -### Documentation - -- Added link to "Python Development Environment Setup Guide" in - project README (#4187, h/t to @michaelawyu) - -### Dependencies - -- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency - on `google-api-core` (#4221, #4280) - -PyPI: https://pypi.org/project/google-cloud-firestore/0.28.0/ diff --git a/firestore/LICENSE b/firestore/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/firestore/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/firestore/MANIFEST.in b/firestore/MANIFEST.in deleted file mode 100644 index 9cbf175afe6b..000000000000 --- a/firestore/MANIFEST.in +++ /dev/null @@ -1,5 +0,0 @@ -include README.rst LICENSE -recursive-include google *.json *.proto -recursive-include tests * -global-exclude *.py[co] -global-exclude __pycache__ diff --git a/firestore/Makefile_v1 b/firestore/Makefile_v1 deleted file mode 100644 index af193e3e819b..000000000000 --- a/firestore/Makefile_v1 +++ /dev/null @@ -1,40 +0,0 @@ -# This makefile builds the protos needed for cross-language Firestore tests. - -# Assume protoc is on the path. The proto compiler must be one that -# supports proto3 syntax. -PROTOC = protoc - -# Dependent repos. -REPO_DIR=$(HOME)/git-repos -PROTOBUF_REPO = $(REPO_DIR)/protobuf -GOOGLEAPIS_REPO = $(REPO_DIR)/googleapis -TESTS_REPO = $(REPO_DIR)/conformance-tests -TEST_PROTO_DIR = $(TESTS_REPO)/firestore/v1 -TEST_PROTO_SRC = $(TEST_PROTO_DIR)/proto/google/cloud/conformance/firestore/v1/tests.proto - -TMPDIR = /tmp/python-fs-proto -TMPDIR_FS = $(TMPDIR)/google/cloud/firestore_v1/proto -TEST_PROTO_COPY = $(TMPDIR_FS)/tests.proto - -.PHONY: sync-protos gen-protos - -gen-protos: sync-protos tweak-protos - # TODO(jba): Put the generated proto somewhere more suitable. - $(PROTOC) --python_out=. \ - -I $(TMPDIR) \ - -I $(PROTOBUF_REPO)/src \ - -I $(GOOGLEAPIS_REPO) \ - $(TEST_PROTO_COPY) - -tweak-protos: - mkdir -p $(TMPDIR_FS) - cp $(GOOGLEAPIS_REPO)/google/firestore/v1/*.proto $(TMPDIR_FS) - sed -i -e 's@google/firestore/v1@google/cloud/firestore_v1/proto@' $(TMPDIR_FS)/*.proto - cp $(TEST_PROTO_SRC) $(TEST_PROTO_COPY) - sed -i -e 's@package google.cloud.conformance.firestore.v1@package google.cloud.firestore_v1.proto@' $(TEST_PROTO_COPY) - sed -i -e 's@google/firestore/v1@google/cloud/firestore_v1/proto@' $(TEST_PROTO_COPY) - -sync-protos: - cd $(PROTOBUF_REPO); git pull - cd $(GOOGLEAPIS_REPO); git pull - cd $(TESTS_REPO); git pull diff --git a/firestore/Makefile_v1beta1 b/firestore/Makefile_v1beta1 deleted file mode 100644 index 69cf87f41a36..000000000000 --- a/firestore/Makefile_v1beta1 +++ /dev/null @@ -1,37 +0,0 @@ -# This makefile builds the protos needed for cross-language Firestore tests. - -# Assume protoc is on the path. The proto compiler must be one that -# supports proto3 syntax. -PROTOC = protoc - -# Dependent repos. -REPO_DIR = $(HOME)/git-repos -PROTOBUF_REPO = $(REPO_DIR)/protobuf -GOOGLEAPIS_REPO = $(REPO_DIR)/googleapis -TESTS_REPO = $(REPO_DIR)/gcp/google-cloud-common - -TMPDIR = /tmp/python-fs-proto -TMPDIR_FS = $(TMPDIR)/google/cloud/firestore_v1beta1/proto - -.PHONY: sync-protos gen-protos - -gen-protos: sync-protos tweak-protos - # TODO(jba): Put the generated proto somewhere more suitable. - $(PROTOC) --python_out=google/cloud/firestore_v1beta1/proto \ - -I $(TMPDIR) \ - -I $(PROTOBUF_REPO)/src \ - -I $(GOOGLEAPIS_REPO) \ - $(TMPDIR)/test_v1beta1.proto - -tweak-protos: - mkdir -p $(TMPDIR_FS) - cp $(GOOGLEAPIS_REPO)/google/firestore/v1beta1/*.proto $(TMPDIR_FS) - sed -i -e 's@google/firestore/v1beta1@google/cloud/firestore_v1beta1/proto@' $(TMPDIR_FS)/*.proto - cp $(TESTS_REPO)/testing/firestore/proto/test_v1beta1.proto $(TMPDIR) - sed -i -e 's@package tests@package tests.v1beta1@' $(TMPDIR)/test_v1beta1.proto - sed -i -e 's@google/firestore/v1beta1@google/cloud/firestore_v1beta1/proto@' $(TMPDIR)/test_v1beta1.proto - -sync-protos: - cd $(PROTOBUF_REPO); git pull - cd $(GOOGLEAPIS_REPO); git pull - #cd $(TESTS_REPO); git pull diff --git a/firestore/README.rst b/firestore/README.rst deleted file mode 100644 index bb109a0efcd7..000000000000 --- a/firestore/README.rst +++ /dev/null @@ -1,115 +0,0 @@ -Python Client for Google Cloud Firestore -======================================== - -|beta| |pypi| |versions| - -The `Google Cloud Firestore`_ API is a flexible, scalable -database for mobile, web, and server development from Firebase and Google -Cloud Platform. Like Firebase Realtime Database, it keeps your data in -sync across client apps through realtime listeners and offers offline support -for mobile and web so you can build responsive apps that work regardless of -network latency or Internet connectivity. Cloud Firestore also offers seamless -integration with other Firebase and Google Cloud Platform products, -including Cloud Functions. - -- `Product Documentation`_ -- `Client Library Documentation`_ - -.. |beta| image:: https://img.shields.io/badge/support-beta-silver.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#beta-support -.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-firestore.svg - :target: https://pypi.org/project/google-cloud-firestore/ -.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-firestore.svg -.. _Google Cloud Firestore: https://cloud.google.com/firestore/ -.. _Product Documentation: https://cloud.google.com/firestore/docs/ -.. _Client Library Documentation: https://googleapis.dev/python/firestore/latest - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. `Enable the Google Cloud Firestore API.`_ -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Enable the Google Cloud Firestore API.: https://cloud.google.com/firestore -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Supported Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 - -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - source /bin/activate - /bin/pip install google-cloud-firestore - - -Windows -^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - \Scripts\activate - \Scripts\pip.exe install google-cloud-firestore - - -Example Usage -~~~~~~~~~~~~~ - -.. code:: python - - from google.cloud import firestore - - # Add a new document - db = firestore.Client() - doc_ref = db.collection(u'users').document(u'alovelace') - doc_ref.set({ - u'first': u'Ada', - u'last': u'Lovelace', - u'born': 1815 - }) - - # Then query for documents - users_ref = db.collection(u'users') - - for doc in users_ref.stream(): - print(u'{} => {}'.format(doc.id, doc.to_dict())) - -Next Steps -~~~~~~~~~~ - -- Read the `Client Library Documentation`_ for Google Cloud Firestore API - to see other available methods on the client. -- Read the `Product Documentation`_ to learn - more about the product and see How-to Guides. diff --git a/firestore/docs/README.rst b/firestore/docs/README.rst deleted file mode 120000 index 89a0106941ff..000000000000 --- a/firestore/docs/README.rst +++ /dev/null @@ -1 +0,0 @@ -../README.rst \ No newline at end of file diff --git a/firestore/docs/_static/custom.css b/firestore/docs/_static/custom.css deleted file mode 100644 index 0abaf229fce3..000000000000 --- a/firestore/docs/_static/custom.css +++ /dev/null @@ -1,4 +0,0 @@ -div#python2-eol { - border-color: red; - border-width: medium; -} \ No newline at end of file diff --git a/firestore/docs/_templates/layout.html b/firestore/docs/_templates/layout.html deleted file mode 100644 index 228529efe2d2..000000000000 --- a/firestore/docs/_templates/layout.html +++ /dev/null @@ -1,50 +0,0 @@ - -{% extends "!layout.html" %} -{%- block content %} -{%- if theme_fixed_sidebar|lower == 'true' %} -
- {{ sidebar() }} - {%- block document %} -
- {%- if render_sidebar %} -
- {%- endif %} - - {%- block relbar_top %} - {%- if theme_show_relbar_top|tobool %} - - {%- endif %} - {% endblock %} - -
-
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please - visit Python 2 support on Google Cloud. -
- {% block body %} {% endblock %} -
- - {%- block relbar_bottom %} - {%- if theme_show_relbar_bottom|tobool %} - - {%- endif %} - {% endblock %} - - {%- if render_sidebar %} -
- {%- endif %} -
- {%- endblock %} -
-
-{%- else %} -{{ super() }} -{%- endif %} -{%- endblock %} diff --git a/firestore/docs/batch.rst b/firestore/docs/batch.rst deleted file mode 100644 index d130d0379170..000000000000 --- a/firestore/docs/batch.rst +++ /dev/null @@ -1,6 +0,0 @@ -Batches -~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.batch - :members: - :show-inheritance: diff --git a/firestore/docs/changelog.md b/firestore/docs/changelog.md deleted file mode 120000 index 04c99a55caae..000000000000 --- a/firestore/docs/changelog.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGELOG.md \ No newline at end of file diff --git a/firestore/docs/client.rst b/firestore/docs/client.rst deleted file mode 100644 index c42eb434706c..000000000000 --- a/firestore/docs/client.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client -~~~~~~ - -.. automodule:: google.cloud.firestore_v1.client - :members: - :show-inheritance: diff --git a/firestore/docs/collection.rst b/firestore/docs/collection.rst deleted file mode 100644 index 22d4d8243e69..000000000000 --- a/firestore/docs/collection.rst +++ /dev/null @@ -1,6 +0,0 @@ -Collections -~~~~~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.collection - :members: - :show-inheritance: diff --git a/firestore/docs/conf.py b/firestore/docs/conf.py deleted file mode 100644 index 747de70b44ff..000000000000 --- a/firestore/docs/conf.py +++ /dev/null @@ -1,356 +0,0 @@ -# -*- coding: utf-8 -*- -# -# google-cloud-firestore documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-firestore" -copyright = u"2017, Google" -author = u"Google APIs" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-firestore-doc" - -# -- Options for warnings ------------------------------------------------------ - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', - # Latex figure (float) alignment - #'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-firestore.tex", - u"google-cloud-firestore Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-firestore", - u"google-cloud-firestore Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-firestore", - u"google-cloud-firestore Documentation", - author, - "google-cloud-firestore", - "GAPIC library for the {metadata.shortName}", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), - "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), - "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), -} - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/firestore/docs/document.rst b/firestore/docs/document.rst deleted file mode 100644 index bc04dd4443b5..000000000000 --- a/firestore/docs/document.rst +++ /dev/null @@ -1,6 +0,0 @@ -Documents -~~~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.document - :members: - :show-inheritance: diff --git a/firestore/docs/field_path.rst b/firestore/docs/field_path.rst deleted file mode 100644 index 006aacf193b7..000000000000 --- a/firestore/docs/field_path.rst +++ /dev/null @@ -1,7 +0,0 @@ -Field Paths -~~~~~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.field_path - :members: - :show-inheritance: - diff --git a/firestore/docs/index.rst b/firestore/docs/index.rst deleted file mode 100644 index b8157df9bd0c..000000000000 --- a/firestore/docs/index.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. include:: README.rst - -.. note:: - - Because the firestore client uses :mod:`grpcio` library, it is safe to - share instances across threads. In multiprocessing scenarios, the best - practice is to create client instances *after* the invocation of - :func:`os.fork` by :class:`multiprocessing.Pool` or - :class:`multiprocessing.Process`. - -API Reference -------------- - -.. toctree:: - :maxdepth: 2 - - client - collection - document - field_path - query - batch - transaction - transforms - types - - -Changelog ---------- - -For a list of all ``google-cloud-firestore`` releases: - -.. toctree:: - :maxdepth: 2 - - changelog diff --git a/firestore/docs/query.rst b/firestore/docs/query.rst deleted file mode 100644 index 8f4117671ced..000000000000 --- a/firestore/docs/query.rst +++ /dev/null @@ -1,6 +0,0 @@ -Queries -~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.query - :members: - :show-inheritance: diff --git a/firestore/docs/transaction.rst b/firestore/docs/transaction.rst deleted file mode 100644 index 97e670a3493e..000000000000 --- a/firestore/docs/transaction.rst +++ /dev/null @@ -1,7 +0,0 @@ -Transactions -~~~~~~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.transaction - :inherited-members: - :members: - :show-inheritance: diff --git a/firestore/docs/transforms.rst b/firestore/docs/transforms.rst deleted file mode 100644 index b3051ca151c3..000000000000 --- a/firestore/docs/transforms.rst +++ /dev/null @@ -1,6 +0,0 @@ -Transforms -~~~~~~~~~~ - -.. automodule:: google.cloud.firestore_v1.transforms - :members: - :show-inheritance: diff --git a/firestore/docs/types.rst b/firestore/docs/types.rst deleted file mode 100644 index ce74845203ac..000000000000 --- a/firestore/docs/types.rst +++ /dev/null @@ -1,6 +0,0 @@ -Types -~~~~~ - -.. automodule:: google.cloud.firestore_v1.types - :members: - :show-inheritance: diff --git a/firestore/google/__init__.py b/firestore/google/__init__.py deleted file mode 100644 index aa5aeae602bc..000000000000 --- a/firestore/google/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/firestore/google/cloud/__init__.py b/firestore/google/cloud/__init__.py deleted file mode 100644 index aa5aeae602bc..000000000000 --- a/firestore/google/cloud/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/firestore/google/cloud/firestore.py b/firestore/google/cloud/firestore.py deleted file mode 100644 index 3bdb9af565b3..000000000000 --- a/firestore/google/cloud/firestore.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Python idiomatic client for Google Cloud Firestore.""" - - -from google.cloud.firestore_v1 import __version__ -from google.cloud.firestore_v1 import ArrayRemove -from google.cloud.firestore_v1 import ArrayUnion -from google.cloud.firestore_v1 import Client -from google.cloud.firestore_v1 import CollectionReference -from google.cloud.firestore_v1 import DELETE_FIELD -from google.cloud.firestore_v1 import DocumentReference -from google.cloud.firestore_v1 import DocumentSnapshot -from google.cloud.firestore_v1 import enums -from google.cloud.firestore_v1 import ExistsOption -from google.cloud.firestore_v1 import GeoPoint -from google.cloud.firestore_v1 import Increment -from google.cloud.firestore_v1 import LastUpdateOption -from google.cloud.firestore_v1 import Maximum -from google.cloud.firestore_v1 import Minimum -from google.cloud.firestore_v1 import Query -from google.cloud.firestore_v1 import ReadAfterWriteError -from google.cloud.firestore_v1 import SERVER_TIMESTAMP -from google.cloud.firestore_v1 import Transaction -from google.cloud.firestore_v1 import transactional -from google.cloud.firestore_v1 import types -from google.cloud.firestore_v1 import Watch -from google.cloud.firestore_v1 import WriteBatch -from google.cloud.firestore_v1 import WriteOption - - -__all__ = [ - "__version__", - "ArrayRemove", - "ArrayUnion", - "Client", - "CollectionReference", - "DELETE_FIELD", - "DocumentReference", - "DocumentSnapshot", - "enums", - "ExistsOption", - "GeoPoint", - "Increment", - "LastUpdateOption", - "Maximum", - "Minimum", - "Query", - "ReadAfterWriteError", - "SERVER_TIMESTAMP", - "Transaction", - "transactional", - "types", - "Watch", - "WriteBatch", - "WriteOption", -] diff --git a/firestore/google/cloud/firestore_admin_v1/__init__.py b/firestore/google/cloud/firestore_admin_v1/__init__.py deleted file mode 100644 index 23f844b617d9..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.firestore_admin_v1 import types -from google.cloud.firestore_admin_v1.gapic import enums -from google.cloud.firestore_admin_v1.gapic import firestore_admin_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class FirestoreAdminClient(firestore_admin_client.FirestoreAdminClient): - __doc__ = firestore_admin_client.FirestoreAdminClient.__doc__ - enums = enums - - -__all__ = ("enums", "types", "FirestoreAdminClient") diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/__init__.py b/firestore/google/cloud/firestore_admin_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/enums.py b/firestore/google/cloud/firestore_admin_v1/gapic/enums.py deleted file mode 100644 index 09acf6c3ef02..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/gapic/enums.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class OperationState(enum.IntEnum): - """ - Describes the state of the operation. - - Attributes: - OPERATION_STATE_UNSPECIFIED (int): Unspecified. - INITIALIZING (int): Request is being prepared for processing. - PROCESSING (int): Request is actively being processed. - CANCELLING (int): Request is in the process of being cancelled after user called - google.longrunning.Operations.CancelOperation on the operation. - FINALIZING (int): Request has been processed and is in its finalization stage. - SUCCESSFUL (int): Request has completed successfully. - FAILED (int): Request has finished being processed, but encountered an error. - CANCELLED (int): Request has finished being cancelled after user called - google.longrunning.Operations.CancelOperation. - """ - - OPERATION_STATE_UNSPECIFIED = 0 - INITIALIZING = 1 - PROCESSING = 2 - CANCELLING = 3 - FINALIZING = 4 - SUCCESSFUL = 5 - FAILED = 6 - CANCELLED = 7 - - -class FieldOperationMetadata(object): - class IndexConfigDelta(object): - class ChangeType(enum.IntEnum): - """ - Specifies how the index is changing. - - Attributes: - CHANGE_TYPE_UNSPECIFIED (int): The type of change is not specified or known. - ADD (int): The single field index is being added. - REMOVE (int): The single field index is being removed. - """ - - CHANGE_TYPE_UNSPECIFIED = 0 - ADD = 1 - REMOVE = 2 - - -class Index(object): - class QueryScope(enum.IntEnum): - """ - Query Scope defines the scope at which a query is run. This is specified - on a StructuredQuery's ``from`` field. - - Attributes: - QUERY_SCOPE_UNSPECIFIED (int): The query scope is unspecified. Not a valid option. - COLLECTION (int): Indexes with a collection query scope specified allow queries - against a collection that is the child of a specific document, specified - at query time, and that has the collection id specified by the index. - COLLECTION_GROUP (int): Indexes with a collection group query scope specified allow queries - against all collections that has the collection id specified by the - index. - """ - - QUERY_SCOPE_UNSPECIFIED = 0 - COLLECTION = 1 - COLLECTION_GROUP = 2 - - class State(enum.IntEnum): - """ - The state of an index. During index creation, an index will be in the - ``CREATING`` state. If the index is created successfully, it will - transition to the ``READY`` state. If the index creation encounters a - problem, the index will transition to the ``NEEDS_REPAIR`` state. - - Attributes: - STATE_UNSPECIFIED (int): The state is unspecified. - CREATING (int): The index is being created. - There is an active long-running operation for the index. - The index is updated when writing a document. - Some index data may exist. - READY (int): The index is ready to be used. - The index is updated when writing a document. - The index is fully populated from all stored documents it applies to. - NEEDS_REPAIR (int): The index was being created, but something went wrong. - There is no active long-running operation for the index, - and the most recently finished long-running operation failed. - The index is not updated when writing a document. - Some index data may exist. - Use the google.longrunning.Operations API to determine why the operation - that last attempted to create this index failed, then re-create the - index. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - NEEDS_REPAIR = 3 - - class IndexField(object): - class ArrayConfig(enum.IntEnum): - """ - The supported array value configurations. - - Attributes: - ARRAY_CONFIG_UNSPECIFIED (int): The index does not support additional array queries. - CONTAINS (int): The index supports array containment queries. - """ - - ARRAY_CONFIG_UNSPECIFIED = 0 - CONTAINS = 1 - - class Order(enum.IntEnum): - """ - The supported orderings. - - Attributes: - ORDER_UNSPECIFIED (int): The ordering is unspecified. Not a valid option. - ASCENDING (int): The field is ordered by ascending field value. - DESCENDING (int): The field is ordered by descending field value. - """ - - ORDER_UNSPECIFIED = 0 - ASCENDING = 1 - DESCENDING = 2 diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client.py b/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client.py deleted file mode 100644 index 9b80814f9f85..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client.py +++ /dev/null @@ -1,1016 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.firestore.admin.v1 FirestoreAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.firestore_admin_v1.gapic import enums -from google.cloud.firestore_admin_v1.gapic import firestore_admin_client_config -from google.cloud.firestore_admin_v1.gapic.transports import ( - firestore_admin_grpc_transport, -) -from google.cloud.firestore_admin_v1.proto import field_pb2 -from google.cloud.firestore_admin_v1.proto import firestore_admin_pb2 -from google.cloud.firestore_admin_v1.proto import firestore_admin_pb2_grpc -from google.cloud.firestore_admin_v1.proto import index_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-firestore" -).version - - -class FirestoreAdminClient(object): - """ - Operations are created by service ``FirestoreAdmin``, but are accessed - via service ``google.longrunning.Operations``. - """ - - SERVICE_ADDRESS = "firestore.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.firestore.admin.v1.FirestoreAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FirestoreAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def database_path(cls, project, database): - """Return a fully-qualified database string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}", - project=project, - database=database, - ) - - @classmethod - def field_path(cls, project, database, collection_id, field_id): - """Return a fully-qualified field string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/collectionGroups/{collection_id}/fields/{field_id}", - project=project, - database=database, - collection_id=collection_id, - field_id=field_id, - ) - - @classmethod - def index_path(cls, project, database, collection_id, index_id): - """Return a fully-qualified index string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/collectionGroups/{collection_id}/indexes/{index_id}", - project=project, - database=database, - collection_id=collection_id, - index_id=index_id, - ) - - @classmethod - def parent_path(cls, project, database, collection_id): - """Return a fully-qualified parent string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/collectionGroups/{collection_id}", - project=project, - database=database, - collection_id=collection_id, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.FirestoreAdminGrpcTransport, - Callable[[~.Credentials, type], ~.FirestoreAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = firestore_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=firestore_admin_grpc_transport.FirestoreAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = firestore_admin_grpc_transport.FirestoreAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_index( - self, - parent, - index, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a composite index. This returns a - ``google.longrunning.Operation`` which may be used to track the status - of the creation. The metadata for the operation will be the type - ``IndexOperationMetadata``. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> parent = client.parent_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]') - >>> - >>> # TODO: Initialize `index`: - >>> index = {} - >>> - >>> response = client.create_index(parent, index) - - Args: - parent (str): Required. A parent name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`` - index (Union[dict, ~google.cloud.firestore_admin_v1.types.Index]): Required. The composite index to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_admin_v1.types.Index` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_index" not in self._inner_api_calls: - self._inner_api_calls[ - "create_index" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_index, - default_retry=self._method_configs["CreateIndex"].retry, - default_timeout=self._method_configs["CreateIndex"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.CreateIndexRequest(parent=parent, index=index) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_index"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_indexes( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists composite indexes. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> parent = client.parent_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]') - >>> - >>> # Iterate over all results - >>> for element in client.list_indexes(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_indexes(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. A parent name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`` - filter_ (str): The filter to apply to list results. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.firestore_admin_v1.types.Index` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_indexes" not in self._inner_api_calls: - self._inner_api_calls[ - "list_indexes" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_indexes, - default_retry=self._method_configs["ListIndexes"].retry, - default_timeout=self._method_configs["ListIndexes"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.ListIndexesRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_indexes"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="indexes", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_index( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a composite index. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> name = client.index_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]', '[INDEX_ID]') - >>> - >>> response = client.get_index(name) - - Args: - name (str): Required. A name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Index` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_index" not in self._inner_api_calls: - self._inner_api_calls[ - "get_index" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_index, - default_retry=self._method_configs["GetIndex"].retry, - default_timeout=self._method_configs["GetIndex"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.GetIndexRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_index"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_index( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a composite index. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> name = client.index_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]', '[INDEX_ID]') - >>> - >>> client.delete_index(name) - - Args: - name (str): Required. A name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_index" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_index" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_index, - default_retry=self._method_configs["DeleteIndex"].retry, - default_timeout=self._method_configs["DeleteIndex"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.DeleteIndexRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_index"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def import_documents( - self, - name, - collection_ids=None, - input_uri_prefix=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Imports documents into Google Cloud Firestore. Existing documents with the - same name are overwritten. The import occurs in the background and its - progress can be monitored and managed via the Operation resource that is - created. If an ImportDocuments operation is cancelled, it is possible - that a subset of the data has already been imported to Cloud Firestore. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> name = client.database_path('[PROJECT]', '[DATABASE]') - >>> - >>> response = client.import_documents(name) - - Args: - name (str): Required. Database to import into. Should be of the form: - ``projects/{project_id}/databases/{database_id}``. - collection_ids (list[str]): Which collection ids to import. Unspecified means all collections included - in the import. - input_uri_prefix (str): Location of the exported files. This must match the output\_uri\_prefix - of an ExportDocumentsResponse from an export that has completed - successfully. See: - ``google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "import_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "import_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.import_documents, - default_retry=self._method_configs["ImportDocuments"].retry, - default_timeout=self._method_configs["ImportDocuments"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.ImportDocumentsRequest( - name=name, collection_ids=collection_ids, input_uri_prefix=input_uri_prefix - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["import_documents"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def export_documents( - self, - name, - collection_ids=None, - output_uri_prefix=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Exports a copy of all or a subset of documents from Google Cloud Firestore - to another storage system, such as Google Cloud Storage. Recent updates to - documents may not be reflected in the export. The export occurs in the - background and its progress can be monitored and managed via the - Operation resource that is created. The output of an export may only be - used once the associated operation is done. If an export operation is - cancelled before completion it may leave partial data behind in Google - Cloud Storage. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> name = client.database_path('[PROJECT]', '[DATABASE]') - >>> - >>> response = client.export_documents(name) - - Args: - name (str): Required. Database to export. Should be of the form: - ``projects/{project_id}/databases/{database_id}``. - collection_ids (list[str]): Which collection ids to export. Unspecified means all collections. - output_uri_prefix (str): The output URI. Currently only supports Google Cloud Storage URIs of the - form: ``gs://BUCKET_NAME[/NAMESPACE_PATH]``, where ``BUCKET_NAME`` is - the name of the Google Cloud Storage bucket and ``NAMESPACE_PATH`` is an - optional Google Cloud Storage namespace path. When choosing a name, be - sure to consider Google Cloud Storage naming guidelines: - https://cloud.google.com/storage/docs/naming. If the URI is a bucket - (without a namespace path), a prefix will be generated based on the - start time. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "export_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "export_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_documents, - default_retry=self._method_configs["ExportDocuments"].retry, - default_timeout=self._method_configs["ExportDocuments"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.ExportDocumentsRequest( - name=name, - collection_ids=collection_ids, - output_uri_prefix=output_uri_prefix, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["export_documents"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_field( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the metadata and configuration for a Field. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> name = client.field_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]', '[FIELD_ID]') - >>> - >>> response = client.get_field(name) - - Args: - name (str): Required. A name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Field` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_field" not in self._inner_api_calls: - self._inner_api_calls[ - "get_field" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_field, - default_retry=self._method_configs["GetField"].retry, - default_timeout=self._method_configs["GetField"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.GetFieldRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_field"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_fields( - self, - parent, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the field configuration and metadata for this database. - - Currently, ``FirestoreAdmin.ListFields`` only supports listing fields - that have been explicitly overridden. To issue this query, call - ``FirestoreAdmin.ListFields`` with the filter set to - ``indexConfig.usesAncestorConfig:false``. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> parent = client.parent_path('[PROJECT]', '[DATABASE]', '[COLLECTION_ID]') - >>> - >>> # Iterate over all results - >>> for element in client.list_fields(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_fields(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. A parent name of the form - ``projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}`` - filter_ (str): The filter to apply to list results. Currently, - ``FirestoreAdmin.ListFields`` only supports listing fields that have - been explicitly overridden. To issue this query, call - ``FirestoreAdmin.ListFields`` with the filter set to - ``indexConfig.usesAncestorConfig:false``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.firestore_admin_v1.types.Field` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_fields" not in self._inner_api_calls: - self._inner_api_calls[ - "list_fields" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_fields, - default_retry=self._method_configs["ListFields"].retry, - default_timeout=self._method_configs["ListFields"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.ListFieldsRequest( - parent=parent, filter=filter_, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_fields"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="fields", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_field( - self, - field, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a field configuration. Currently, field updates apply only to - single field index configuration. However, calls to - ``FirestoreAdmin.UpdateField`` should provide a field mask to avoid - changing any configuration that the caller isn't aware of. The field - mask should be specified as: ``{ paths: "index_config" }``. - - This call returns a ``google.longrunning.Operation`` which may be used - to track the status of the field update. The metadata for the operation - will be the type ``FieldOperationMetadata``. - - To configure the default field settings for the database, use the - special ``Field`` with resource name: - ``projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*``. - - Example: - >>> from google.cloud import firestore_admin_v1 - >>> - >>> client = firestore_admin_v1.FirestoreAdminClient() - >>> - >>> # TODO: Initialize `field`: - >>> field = {} - >>> - >>> response = client.update_field(field) - - Args: - field (Union[dict, ~google.cloud.firestore_admin_v1.types.Field]): Required. The field to be updated. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_admin_v1.types.Field` - update_mask (Union[dict, ~google.cloud.firestore_admin_v1.types.FieldMask]): A mask, relative to the field. If specified, only configuration - specified by this field\_mask will be updated in the field. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_admin_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_admin_v1.types.Operation` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_field" not in self._inner_api_calls: - self._inner_api_calls[ - "update_field" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_field, - default_retry=self._method_configs["UpdateField"].retry, - default_timeout=self._method_configs["UpdateField"].timeout, - client_info=self._client_info, - ) - - request = firestore_admin_pb2.UpdateFieldRequest( - field=field, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("field.name", field.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_field"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client_config.py b/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client_config.py deleted file mode 100644 index f073ae4566ac..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client_config.py +++ /dev/null @@ -1,68 +0,0 @@ -config = { - "interfaces": { - "google.firestore.admin.v1.FirestoreAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateIndex": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListIndexes": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetIndex": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteIndex": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ImportDocuments": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ExportDocuments": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetField": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListFields": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateField": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/transports/__init__.py b/firestore/google/cloud/firestore_admin_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_admin_v1/gapic/transports/firestore_admin_grpc_transport.py b/firestore/google/cloud/firestore_admin_v1/gapic/transports/firestore_admin_grpc_transport.py deleted file mode 100644 index f1bdc01711f0..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/gapic/transports/firestore_admin_grpc_transport.py +++ /dev/null @@ -1,259 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.firestore_admin_v1.proto import firestore_admin_pb2_grpc - - -class FirestoreAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.firestore.admin.v1 FirestoreAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/datastore", - ) - - def __init__( - self, channel=None, credentials=None, address="firestore.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "firestore_admin_stub": firestore_admin_pb2_grpc.FirestoreAdminStub(channel) - } - - @classmethod - def create_channel( - cls, address="firestore.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_index(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.create_index`. - - Creates a composite index. This returns a - ``google.longrunning.Operation`` which may be used to track the status - of the creation. The metadata for the operation will be the type - ``IndexOperationMetadata``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].CreateIndex - - @property - def list_indexes(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.list_indexes`. - - Lists composite indexes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].ListIndexes - - @property - def get_index(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.get_index`. - - Gets a composite index. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].GetIndex - - @property - def delete_index(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.delete_index`. - - Deletes a composite index. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].DeleteIndex - - @property - def import_documents(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.import_documents`. - - Imports documents into Google Cloud Firestore. Existing documents with the - same name are overwritten. The import occurs in the background and its - progress can be monitored and managed via the Operation resource that is - created. If an ImportDocuments operation is cancelled, it is possible - that a subset of the data has already been imported to Cloud Firestore. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].ImportDocuments - - @property - def export_documents(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.export_documents`. - - Exports a copy of all or a subset of documents from Google Cloud Firestore - to another storage system, such as Google Cloud Storage. Recent updates to - documents may not be reflected in the export. The export occurs in the - background and its progress can be monitored and managed via the - Operation resource that is created. The output of an export may only be - used once the associated operation is done. If an export operation is - cancelled before completion it may leave partial data behind in Google - Cloud Storage. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].ExportDocuments - - @property - def get_field(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.get_field`. - - Gets the metadata and configuration for a Field. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].GetField - - @property - def list_fields(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.list_fields`. - - Lists the field configuration and metadata for this database. - - Currently, ``FirestoreAdmin.ListFields`` only supports listing fields - that have been explicitly overridden. To issue this query, call - ``FirestoreAdmin.ListFields`` with the filter set to - ``indexConfig.usesAncestorConfig:false``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].ListFields - - @property - def update_field(self): - """Return the gRPC stub for :meth:`FirestoreAdminClient.update_field`. - - Updates a field configuration. Currently, field updates apply only to - single field index configuration. However, calls to - ``FirestoreAdmin.UpdateField`` should provide a field mask to avoid - changing any configuration that the caller isn't aware of. The field - mask should be specified as: ``{ paths: "index_config" }``. - - This call returns a ``google.longrunning.Operation`` which may be used - to track the status of the field update. The metadata for the operation - will be the type ``FieldOperationMetadata``. - - To configure the default field settings for the database, use the - special ``Field`` with resource name: - ``projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_admin_stub"].UpdateField diff --git a/firestore/google/cloud/firestore_admin_v1/proto/__init__.py b/firestore/google/cloud/firestore_admin_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_admin_v1/proto/field.proto b/firestore/google/cloud/firestore_admin_v1/proto/field.proto deleted file mode 100644 index 48430d87c1be..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/field.proto +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/resource.proto"; -import "google/firestore/admin/v1/index.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FieldProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; - -// Represents a single field in the database. -// -// Fields are grouped by their "Collection Group", which represent all -// collections in the database with the same id. -message Field { - option (google.api.resource) = { - type: "firestore.googleapis.com/Field" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}" - }; - - // The index configuration for this field. - message IndexConfig { - // The indexes supported for this field. - repeated Index indexes = 1; - - // Output only. When true, the `Field`'s index configuration is set from the - // configuration specified by the `ancestor_field`. - // When false, the `Field`'s index configuration is defined explicitly. - bool uses_ancestor_config = 2; - - // Output only. Specifies the resource name of the `Field` from which this field's - // index configuration is set (when `uses_ancestor_config` is true), - // or from which it *would* be set if this field had no index configuration - // (when `uses_ancestor_config` is false). - string ancestor_field = 3; - - // Output only - // When true, the `Field`'s index configuration is in the process of being - // reverted. Once complete, the index config will transition to the same - // state as the field specified by `ancestor_field`, at which point - // `uses_ancestor_config` will be `true` and `reverting` will be `false`. - bool reverting = 4; - } - - // A field name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - // - // A field path may be a simple field name, e.g. `address` or a path to fields - // within map_value , e.g. `address.city`, - // or a special field path. The only valid special field is `*`, which - // represents any field. - // - // Field paths may be quoted using ` (backtick). The only character that needs - // to be escaped within a quoted field path is the backtick character itself, - // escaped using a backslash. Special characters in field paths that - // must be quoted include: `*`, `.`, - // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. - // - // Examples: - // (Note: Comments here are written in markdown syntax, so there is an - // additional layer of backticks to represent a code block) - // `\`address.city\`` represents a field named `address.city`, not the map key - // `city` in the field `address`. - // `\`*\`` represents a field named `*`, not any field. - // - // A special `Field` contains the default indexing settings for all fields. - // This field's resource name is: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` - // Indexes defined on this `Field` will be applied to all fields which do not - // have their own `Field` index configuration. - string name = 1; - - // The index configuration for this field. If unset, field indexing will - // revert to the configuration defined by the `ancestor_field`. To - // explicitly remove all indexes for this field, specify an index config - // with an empty list of indexes. - IndexConfig index_config = 2; -} diff --git a/firestore/google/cloud/firestore_admin_v1/proto/field_pb2.py b/firestore/google/cloud/firestore_admin_v1/proto/field_pb2.py deleted file mode 100644 index 281ac78d874a..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/field_pb2.py +++ /dev/null @@ -1,288 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore/admin_v1/proto/field.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.firestore_admin_v1.proto import ( - index_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore/admin_v1/proto/field.proto", - package="google.firestore.admin.v1", - syntax="proto3", - serialized_options=_b( - "\n\035com.google.firestore.admin.v1B\nFieldProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1" - ), - serialized_pb=_b( - '\n1google/cloud/firestore/admin_v1/proto/field.proto\x12\x19google.firestore.admin.v1\x1a\x19google/api/resource.proto\x1a\x31google/cloud/firestore/admin_v1/proto/index.proto\x1a\x1cgoogle/api/annotations.proto"\xe0\x02\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x42\n\x0cindex_config\x18\x02 \x01(\x0b\x32,.google.firestore.admin.v1.Field.IndexConfig\x1a\x89\x01\n\x0bIndexConfig\x12\x31\n\x07indexes\x18\x01 \x03(\x0b\x32 .google.firestore.admin.v1.Index\x12\x1c\n\x14uses_ancestor_config\x18\x02 \x01(\x08\x12\x16\n\x0e\x61ncestor_field\x18\x03 \x01(\t\x12\x11\n\treverting\x18\x04 \x01(\x08:y\xea\x41v\n\x1e\x66irestore.googleapis.com/Field\x12Tprojects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}B\xb8\x01\n\x1d\x63om.google.firestore.admin.v1B\nFieldProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_FIELD_INDEXCONFIG = _descriptor.Descriptor( - name="IndexConfig", - full_name="google.firestore.admin.v1.Field.IndexConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="indexes", - full_name="google.firestore.admin.v1.Field.IndexConfig.indexes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="uses_ancestor_config", - full_name="google.firestore.admin.v1.Field.IndexConfig.uses_ancestor_config", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ancestor_field", - full_name="google.firestore.admin.v1.Field.IndexConfig.ancestor_field", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="reverting", - full_name="google.firestore.admin.v1.Field.IndexConfig.reverting", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=281, - serialized_end=418, -) - -_FIELD = _descriptor.Descriptor( - name="Field", - full_name="google.firestore.admin.v1.Field", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.Field.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="index_config", - full_name="google.firestore.admin.v1.Field.index_config", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_FIELD_INDEXCONFIG], - enum_types=[], - serialized_options=_b( - "\352Av\n\036firestore.googleapis.com/Field\022Tprojects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}" - ), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=189, - serialized_end=541, -) - -_FIELD_INDEXCONFIG.fields_by_name[ - "indexes" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2._INDEX -) -_FIELD_INDEXCONFIG.containing_type = _FIELD -_FIELD.fields_by_name["index_config"].message_type = _FIELD_INDEXCONFIG -DESCRIPTOR.message_types_by_name["Field"] = _FIELD -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Field = _reflection.GeneratedProtocolMessageType( - "Field", - (_message.Message,), - dict( - IndexConfig=_reflection.GeneratedProtocolMessageType( - "IndexConfig", - (_message.Message,), - dict( - DESCRIPTOR=_FIELD_INDEXCONFIG, - __module__="google.cloud.firestore.admin_v1.proto.field_pb2", - __doc__="""The index configuration for this field. - - - Attributes: - indexes: - The indexes supported for this field. - uses_ancestor_config: - Output only. When true, the ``Field``'s index configuration is - set from the configuration specified by the - ``ancestor_field``. When false, the ``Field``'s index - configuration is defined explicitly. - ancestor_field: - Output only. Specifies the resource name of the ``Field`` from - which this field's index configuration is set (when - ``uses_ancestor_config`` is true), or from which it *would* be - set if this field had no index configuration (when - ``uses_ancestor_config`` is false). - reverting: - Output only When true, the ``Field``'s index configuration is - in the process of being reverted. Once complete, the index - config will transition to the same state as the field - specified by ``ancestor_field``, at which point - ``uses_ancestor_config`` will be ``true`` and ``reverting`` - will be ``false``. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Field.IndexConfig) - ), - ), - DESCRIPTOR=_FIELD, - __module__="google.cloud.firestore.admin_v1.proto.field_pb2", - __doc__="""Represents a single field in the database. - - Fields are grouped by their "Collection Group", which represent all - collections in the database with the same id. - - - Attributes: - name: - A field name of the form ``projects/{project_id}/databases/{da - tabase_id}/collectionGroups/{collection_id}/fields/{field_path - }`` A field path may be a simple field name, e.g. ``address`` - or a path to fields within map\_value , e.g. ``address.city``, - or a special field path. The only valid special field is - ``*``, which represents any field. Field paths may be quoted - using ``(backtick). The only character that needs to be - escaped within a quoted field path is the backtick character - itself, escaped using a backslash. Special characters in field - paths that must be quoted include:``\ \*\ ``,``.\ ``, ``` - (backtick),``\ [``,``]\`, as well as any ascii symbolic - characters. Examples: (Note: Comments here are written in - markdown syntax, so there is an additional layer of backticks - to represent a code block) ``\``\ address.city\`\ ``represents - a field named``\ address.city\ ``, not the map key``\ city\ - ``in the field``\ address\ ``.``\ \`\ *\`\ ``represents a - field named``*\ \`, not any field. A special ``Field`` - contains the default indexing settings for all fields. This - field's resource name is: ``projects/{project_id}/databases/{d - atabase_id}/collectionGroups/__default__/fields/*`` Indexes - defined on this ``Field`` will be applied to all fields which - do not have their own ``Field`` index configuration. - index_config: - The index configuration for this field. If unset, field - indexing will revert to the configuration defined by the - ``ancestor_field``. To explicitly remove all indexes for this - field, specify an index config with an empty list of indexes. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Field) - ), -) -_sym_db.RegisterMessage(Field) -_sym_db.RegisterMessage(Field.IndexConfig) - - -DESCRIPTOR._options = None -_FIELD._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/field_pb2_grpc.py b/firestore/google/cloud/firestore_admin_v1/proto/field_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/field_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin.proto b/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin.proto deleted file mode 100644 index 75dd2d3113eb..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin.proto +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/firestore/admin/v1/field.proto"; -import "google/firestore/admin/v1/index.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreAdminProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; -option (google.api.resource_definition) = { - type: "firestore.googleapis.com/Database" - pattern: "projects/{project}/databases/{database}" -}; -option (google.api.resource_definition) = { - type: "firestore.googleapis.com/CollectionGroup" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}" -}; - -// Operations are created by service `FirestoreAdmin`, but are accessed via -// service `google.longrunning.Operations`. -service FirestoreAdmin { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] - // which may be used to track the status of the creation. The metadata for - // the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata]. - rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - body: "index" - }; - option (google.api.method_signature) = "parent,index"; - option (google.longrunning.operation_info) = { - response_type: "Index" - metadata_type: "IndexOperationMetadata" - }; - } - - // Lists composite indexes. - rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes" - }; - option (google.api.method_signature) = "parent"; - } - - // Gets a composite index. - rpc GetIndex(GetIndexRequest) returns (Index) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Deletes a composite index. - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Gets the metadata and configuration for a Field. - rpc GetField(GetFieldRequest) returns (Field) { - option (google.api.http) = { - get: "/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Updates a field configuration. Currently, field updates apply only to - // single field index configuration. However, calls to - // [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField] should provide a field mask to avoid - // changing any configuration that the caller isn't aware of. The field mask - // should be specified as: `{ paths: "index_config" }`. - // - // This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to - // track the status of the field update. The metadata for - // the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata]. - // - // To configure the default field settings for the database, use - // the special `Field` with resource name: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. - rpc UpdateField(UpdateFieldRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}" - body: "field" - }; - option (google.api.method_signature) = "field"; - option (google.longrunning.operation_info) = { - response_type: "Field" - metadata_type: "FieldOperationMetadata" - }; - } - - // Lists the field configuration and metadata for this database. - // - // Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to - // `indexConfig.usesAncestorConfig:false`. - rpc ListFields(ListFieldsRequest) returns (ListFieldsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields" - }; - option (google.api.method_signature) = "parent"; - } - - // Exports a copy of all or a subset of documents from Google Cloud Firestore - // to another storage system, such as Google Cloud Storage. Recent updates to - // documents may not be reflected in the export. The export occurs in the - // background and its progress can be monitored and managed via the - // Operation resource that is created. The output of an export may only be - // used once the associated operation is done. If an export operation is - // cancelled before completion it may leave partial data behind in Google - // Cloud Storage. - rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{name=projects/*/databases/*}:exportDocuments" - body: "*" - }; - option (google.api.method_signature) = "name"; - option (google.longrunning.operation_info) = { - response_type: "ExportDocumentsResponse" - metadata_type: "ExportDocumentsMetadata" - }; - } - - // Imports documents into Google Cloud Firestore. Existing documents with the - // same name are overwritten. The import occurs in the background and its - // progress can be monitored and managed via the Operation resource that is - // created. If an ImportDocuments operation is cancelled, it is possible - // that a subset of the data has already been imported to Cloud Firestore. - rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{name=projects/*/databases/*}:importDocuments" - body: "*" - }; - option (google.api.method_signature) = "name"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "ImportDocumentsMetadata" - }; - } -} - -// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. -message CreateIndexRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // Required. The composite index to create. - Index index = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. -message ListIndexesRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // The filter to apply to list results. - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. -message ListIndexesResponse { - // The requested indexes. - repeated Index indexes = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex]. -message GetIndexRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Index" - } - ]; -} - -// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex]. -message DeleteIndexRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Index" - } - ]; -} - -// The request for [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. -message UpdateFieldRequest { - // Required. The field to be updated. - Field field = 1 [(google.api.field_behavior) = REQUIRED]; - - // A mask, relative to the field. If specified, only configuration specified - // by this field_mask will be updated in the field. - google.protobuf.FieldMask update_mask = 2; -} - -// The request for [FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField]. -message GetFieldRequest { - // Required. A name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_id}` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Field" - } - ]; -} - -// The request for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. -message ListFieldsRequest { - // Required. A parent name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}` - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/CollectionGroup" - } - ]; - - // The filter to apply to list results. Currently, - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields - // that have been explicitly overridden. To issue this query, call - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to - // `indexConfig.usesAncestorConfig:false`. - string filter = 2; - - // The number of results to return. - int32 page_size = 3; - - // A page token, returned from a previous call to - // [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields], that may be used to get the next - // page of results. - string page_token = 4; -} - -// The response for [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. -message ListFieldsResponse { - // The requested fields. - repeated Field fields = 1; - - // A page token that may be used to request another page of results. If blank, - // this is the last page. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsRequest { - // Required. Database to export. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Database" - } - ]; - - // Which collection ids to export. Unspecified means all collections. - repeated string collection_ids = 2; - - // The output URI. Currently only supports Google Cloud Storage URIs of the - // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name - // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional - // Google Cloud Storage namespace path. When - // choosing a name, be sure to consider Google Cloud Storage naming - // guidelines: https://cloud.google.com/storage/docs/naming. - // If the URI is a bucket (without a namespace path), a prefix will be - // generated based on the start time. - string output_uri_prefix = 3; -} - -// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsRequest { - // Required. Database to import into. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "firestore.googleapis.com/Database" - } - ]; - - // Which collection ids to import. Unspecified means all collections included - // in the import. - repeated string collection_ids = 2; - - // Location of the exported files. - // This must match the output_uri_prefix of an ExportDocumentsResponse from - // an export that has completed successfully. - // See: - // [google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix]. - string input_uri_prefix = 3; -} diff --git a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2.py b/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2.py deleted file mode 100644 index 0737cfd86e91..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2.py +++ /dev/null @@ -1,1196 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore/admin_v1/proto/firestore_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.firestore_admin_v1.proto import ( - field_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2, -) -from google.cloud.firestore_admin_v1.proto import ( - index_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore/admin_v1/proto/firestore_admin.proto", - package="google.firestore.admin.v1", - syntax="proto3", - serialized_options=_b( - "\n\035com.google.firestore.admin.v1B\023FirestoreAdminProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1\352AL\n!firestore.googleapis.com/Database\022'projects/{project}/databases/{database}\352Aq\n(firestore.googleapis.com/CollectionGroup\022Eprojects/{project}/databases/{database}/collectionGroups/{collection}" - ), - serialized_pb=_b( - '\n;google/cloud/firestore/admin_v1/proto/firestore_admin.proto\x12\x19google.firestore.admin.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/firestore/admin_v1/proto/field.proto\x1a\x31google/cloud/firestore/admin_v1/proto/index.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto"\x8c\x01\n\x12\x43reateIndexRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(firestore.googleapis.com/CollectionGroup\x12\x34\n\x05index\x18\x02 \x01(\x0b\x32 .google.firestore.admin.v1.IndexB\x03\xe0\x41\x02"\x8d\x01\n\x12ListIndexesRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(firestore.googleapis.com/CollectionGroup\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"a\n\x13ListIndexesResponse\x12\x31\n\x07indexes\x18\x01 \x03(\x0b\x32 .google.firestore.admin.v1.Index\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x0fGetIndexRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x66irestore.googleapis.com/Index"J\n\x12\x44\x65leteIndexRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x66irestore.googleapis.com/Index"{\n\x12UpdateFieldRequest\x12\x34\n\x05\x66ield\x18\x01 \x01(\x0b\x32 .google.firestore.admin.v1.FieldB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"G\n\x0fGetFieldRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x66irestore.googleapis.com/Field"\x8c\x01\n\x11ListFieldsRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(firestore.googleapis.com/CollectionGroup\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"_\n\x12ListFieldsResponse\x12\x30\n\x06\x66ields\x18\x01 \x03(\x0b\x32 .google.firestore.admin.v1.Field\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x84\x01\n\x16\x45xportDocumentsRequest\x12\x37\n\x04name\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!firestore.googleapis.com/Database\x12\x16\n\x0e\x63ollection_ids\x18\x02 \x03(\t\x12\x19\n\x11output_uri_prefix\x18\x03 \x01(\t"\x83\x01\n\x16ImportDocumentsRequest\x12\x37\n\x04name\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!firestore.googleapis.com/Database\x12\x16\n\x0e\x63ollection_ids\x18\x02 \x03(\t\x12\x18\n\x10input_uri_prefix\x18\x03 \x01(\t2\xf5\x0e\n\x0e\x46irestoreAdmin\x12\xdb\x01\n\x0b\x43reateIndex\x12-.google.firestore.admin.v1.CreateIndexRequest\x1a\x1d.google.longrunning.Operation"~\x82\xd3\xe4\x93\x02G">/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes:\x05index\xda\x41\x0cparent,index\xca\x41\x1f\n\x05Index\x12\x16IndexOperationMetadata\x12\xbd\x01\n\x0bListIndexes\x12-.google.firestore.admin.v1.ListIndexesRequest\x1a..google.firestore.admin.v1.ListIndexesResponse"O\x82\xd3\xe4\x93\x02@\x12>/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes\xda\x41\x06parent\x12\xa7\x01\n\x08GetIndex\x12*.google.firestore.admin.v1.GetIndexRequest\x1a .google.firestore.admin.v1.Index"M\x82\xd3\xe4\x93\x02@\x12>/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}\xda\x41\x04name\x12\xa3\x01\n\x0b\x44\x65leteIndex\x12-.google.firestore.admin.v1.DeleteIndexRequest\x1a\x16.google.protobuf.Empty"M\x82\xd3\xe4\x93\x02@*>/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}\xda\x41\x04name\x12\xa6\x01\n\x08GetField\x12*.google.firestore.admin.v1.GetFieldRequest\x1a .google.firestore.admin.v1.Field"L\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}\xda\x41\x04name\x12\xd9\x01\n\x0bUpdateField\x12-.google.firestore.admin.v1.UpdateFieldRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02L2C/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}:\x05\x66ield\xda\x41\x05\x66ield\xca\x41\x1f\n\x05\x46ield\x12\x16\x46ieldOperationMetadata\x12\xb9\x01\n\nListFields\x12,.google.firestore.admin.v1.ListFieldsRequest\x1a-.google.firestore.admin.v1.ListFieldsResponse"N\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields\xda\x41\x06parent\x12\xdd\x01\n\x0f\x45xportDocuments\x12\x31.google.firestore.admin.v1.ExportDocumentsRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/databases/*}:exportDocuments:\x01*\xda\x41\x04name\xca\x41\x32\n\x17\x45xportDocumentsResponse\x12\x17\x45xportDocumentsMetadata\x12\xdb\x01\n\x0fImportDocuments\x12\x31.google.firestore.admin.v1.ImportDocumentsRequest\x1a\x1d.google.longrunning.Operation"v\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/databases/*}:importDocuments:\x01*\xda\x41\x04name\xca\x41\x30\n\x15google.protobuf.Empty\x12\x17ImportDocumentsMetadata\x1av\xca\x41\x18\x66irestore.googleapis.com\xd2\x41Xhttps://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastoreB\x84\x03\n\x1d\x63om.google.firestore.admin.v1B\x13\x46irestoreAdminProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1\xea\x41L\n!firestore.googleapis.com/Database\x12\'projects/{project}/databases/{database}\xea\x41q\n(firestore.googleapis.com/CollectionGroup\x12\x45projects/{project}/databases/{database}/collectionGroups/{collection}b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - ], -) - - -_CREATEINDEXREQUEST = _descriptor.Descriptor( - name="CreateIndexRequest", - full_name="google.firestore.admin.v1.CreateIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.admin.v1.CreateIndexRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A*\n(firestore.googleapis.com/CollectionGroup" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="index", - full_name="google.firestore.admin.v1.CreateIndexRequest.index", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=408, - serialized_end=548, -) - - -_LISTINDEXESREQUEST = _descriptor.Descriptor( - name="ListIndexesRequest", - full_name="google.firestore.admin.v1.ListIndexesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.admin.v1.ListIndexesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A*\n(firestore.googleapis.com/CollectionGroup" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.firestore.admin.v1.ListIndexesRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.firestore.admin.v1.ListIndexesRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.firestore.admin.v1.ListIndexesRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=551, - serialized_end=692, -) - - -_LISTINDEXESRESPONSE = _descriptor.Descriptor( - name="ListIndexesResponse", - full_name="google.firestore.admin.v1.ListIndexesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="indexes", - full_name="google.firestore.admin.v1.ListIndexesResponse.indexes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.firestore.admin.v1.ListIndexesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=694, - serialized_end=791, -) - - -_GETINDEXREQUEST = _descriptor.Descriptor( - name="GetIndexRequest", - full_name="google.firestore.admin.v1.GetIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.GetIndexRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A \n\036firestore.googleapis.com/Index" - ), - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=793, - serialized_end=864, -) - - -_DELETEINDEXREQUEST = _descriptor.Descriptor( - name="DeleteIndexRequest", - full_name="google.firestore.admin.v1.DeleteIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.DeleteIndexRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A \n\036firestore.googleapis.com/Index" - ), - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=866, - serialized_end=940, -) - - -_UPDATEFIELDREQUEST = _descriptor.Descriptor( - name="UpdateFieldRequest", - full_name="google.firestore.admin.v1.UpdateFieldRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="google.firestore.admin.v1.UpdateFieldRequest.field", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.firestore.admin.v1.UpdateFieldRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=942, - serialized_end=1065, -) - - -_GETFIELDREQUEST = _descriptor.Descriptor( - name="GetFieldRequest", - full_name="google.firestore.admin.v1.GetFieldRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.GetFieldRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A \n\036firestore.googleapis.com/Field" - ), - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1067, - serialized_end=1138, -) - - -_LISTFIELDSREQUEST = _descriptor.Descriptor( - name="ListFieldsRequest", - full_name="google.firestore.admin.v1.ListFieldsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.admin.v1.ListFieldsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A*\n(firestore.googleapis.com/CollectionGroup" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.firestore.admin.v1.ListFieldsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.firestore.admin.v1.ListFieldsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.firestore.admin.v1.ListFieldsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1141, - serialized_end=1281, -) - - -_LISTFIELDSRESPONSE = _descriptor.Descriptor( - name="ListFieldsResponse", - full_name="google.firestore.admin.v1.ListFieldsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.admin.v1.ListFieldsResponse.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.firestore.admin.v1.ListFieldsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1283, - serialized_end=1378, -) - - -_EXPORTDOCUMENTSREQUEST = _descriptor.Descriptor( - name="ExportDocumentsRequest", - full_name="google.firestore.admin.v1.ExportDocumentsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.ExportDocumentsRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A#\n!firestore.googleapis.com/Database" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_ids", - full_name="google.firestore.admin.v1.ExportDocumentsRequest.collection_ids", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="output_uri_prefix", - full_name="google.firestore.admin.v1.ExportDocumentsRequest.output_uri_prefix", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1381, - serialized_end=1513, -) - - -_IMPORTDOCUMENTSREQUEST = _descriptor.Descriptor( - name="ImportDocumentsRequest", - full_name="google.firestore.admin.v1.ImportDocumentsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.ImportDocumentsRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A#\n!firestore.googleapis.com/Database" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_ids", - full_name="google.firestore.admin.v1.ImportDocumentsRequest.collection_ids", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="input_uri_prefix", - full_name="google.firestore.admin.v1.ImportDocumentsRequest.input_uri_prefix", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1516, - serialized_end=1647, -) - -_CREATEINDEXREQUEST.fields_by_name[ - "index" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2._INDEX -) -_LISTINDEXESRESPONSE.fields_by_name[ - "indexes" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2._INDEX -) -_UPDATEFIELDREQUEST.fields_by_name[ - "field" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2._FIELD -) -_UPDATEFIELDREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTFIELDSRESPONSE.fields_by_name[ - "fields" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2._FIELD -) -DESCRIPTOR.message_types_by_name["CreateIndexRequest"] = _CREATEINDEXREQUEST -DESCRIPTOR.message_types_by_name["ListIndexesRequest"] = _LISTINDEXESREQUEST -DESCRIPTOR.message_types_by_name["ListIndexesResponse"] = _LISTINDEXESRESPONSE -DESCRIPTOR.message_types_by_name["GetIndexRequest"] = _GETINDEXREQUEST -DESCRIPTOR.message_types_by_name["DeleteIndexRequest"] = _DELETEINDEXREQUEST -DESCRIPTOR.message_types_by_name["UpdateFieldRequest"] = _UPDATEFIELDREQUEST -DESCRIPTOR.message_types_by_name["GetFieldRequest"] = _GETFIELDREQUEST -DESCRIPTOR.message_types_by_name["ListFieldsRequest"] = _LISTFIELDSREQUEST -DESCRIPTOR.message_types_by_name["ListFieldsResponse"] = _LISTFIELDSRESPONSE -DESCRIPTOR.message_types_by_name["ExportDocumentsRequest"] = _EXPORTDOCUMENTSREQUEST -DESCRIPTOR.message_types_by_name["ImportDocumentsRequest"] = _IMPORTDOCUMENTSREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateIndexRequest = _reflection.GeneratedProtocolMessageType( - "CreateIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINDEXREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. - - - Attributes: - parent: - Required. A parent name of the form ``projects/{project_id}/da - tabases/{database_id}/collectionGroups/{collection_id}`` - index: - Required. The composite index to create. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.CreateIndexRequest) - ), -) -_sym_db.RegisterMessage(CreateIndexRequest) - -ListIndexesRequest = _reflection.GeneratedProtocolMessageType( - "ListIndexesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINDEXESREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. - - - Attributes: - parent: - Required. A parent name of the form ``projects/{project_id}/da - tabases/{database_id}/collectionGroups/{collection_id}`` - filter: - The filter to apply to list results. - page_size: - The number of results to return. - page_token: - A page token, returned from a previous call to [FirestoreAdmin - .ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListInd - exes], that may be used to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ListIndexesRequest) - ), -) -_sym_db.RegisterMessage(ListIndexesRequest) - -ListIndexesResponse = _reflection.GeneratedProtocolMessageType( - "ListIndexesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINDEXESRESPONSE, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The response for - [FirestoreAdmin.ListIndexes][google.firestore.admin.v1.FirestoreAdmin.ListIndexes]. - - - Attributes: - indexes: - The requested indexes. - next_page_token: - A page token that may be used to request another page of - results. If blank, this is the last page. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ListIndexesResponse) - ), -) -_sym_db.RegisterMessage(ListIndexesResponse) - -GetIndexRequest = _reflection.GeneratedProtocolMessageType( - "GetIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETINDEXREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.GetIndex][google.firestore.admin.v1.FirestoreAdmin.GetIndex]. - - - Attributes: - name: - Required. A name of the form ``projects/{project_id}/databases - /{database_id}/collectionGroups/{collection_id}/indexes/{index - _id}`` - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.GetIndexRequest) - ), -) -_sym_db.RegisterMessage(GetIndexRequest) - -DeleteIndexRequest = _reflection.GeneratedProtocolMessageType( - "DeleteIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEINDEXREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1.FirestoreAdmin.DeleteIndex]. - - - Attributes: - name: - Required. A name of the form ``projects/{project_id}/databases - /{database_id}/collectionGroups/{collection_id}/indexes/{index - _id}`` - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.DeleteIndexRequest) - ), -) -_sym_db.RegisterMessage(DeleteIndexRequest) - -UpdateFieldRequest = _reflection.GeneratedProtocolMessageType( - "UpdateFieldRequest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEFIELDREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. - - - Attributes: - field: - Required. The field to be updated. - update_mask: - A mask, relative to the field. If specified, only - configuration specified by this field\_mask will be updated in - the field. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.UpdateFieldRequest) - ), -) -_sym_db.RegisterMessage(UpdateFieldRequest) - -GetFieldRequest = _reflection.GeneratedProtocolMessageType( - "GetFieldRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETFIELDREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.GetField][google.firestore.admin.v1.FirestoreAdmin.GetField]. - - - Attributes: - name: - Required. A name of the form ``projects/{project_id}/databases - /{database_id}/collectionGroups/{collection_id}/fields/{field_ - id}`` - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.GetFieldRequest) - ), -) -_sym_db.RegisterMessage(GetFieldRequest) - -ListFieldsRequest = _reflection.GeneratedProtocolMessageType( - "ListFieldsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTFIELDSREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. - - - Attributes: - parent: - Required. A parent name of the form ``projects/{project_id}/da - tabases/{database_id}/collectionGroups/{collection_id}`` - filter: - The filter to apply to list results. Currently, [FirestoreAdmi - n.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFie - lds] only supports listing fields that have been explicitly - overridden. To issue this query, call [FirestoreAdmin.ListFiel - ds][google.firestore.admin.v1.FirestoreAdmin.ListFields] with - the filter set to ``indexConfig.usesAncestorConfig:false``. - page_size: - The number of results to return. - page_token: - A page token, returned from a previous call to [FirestoreAdmin - .ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFiel - ds], that may be used to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ListFieldsRequest) - ), -) -_sym_db.RegisterMessage(ListFieldsRequest) - -ListFieldsResponse = _reflection.GeneratedProtocolMessageType( - "ListFieldsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTFIELDSRESPONSE, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The response for - [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields]. - - - Attributes: - fields: - The requested fields. - next_page_token: - A page token that may be used to request another page of - results. If blank, this is the last page. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ListFieldsResponse) - ), -) -_sym_db.RegisterMessage(ListFieldsResponse) - -ExportDocumentsRequest = _reflection.GeneratedProtocolMessageType( - "ExportDocumentsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDOCUMENTSREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. - - - Attributes: - name: - Required. Database to export. Should be of the form: - ``projects/{project_id}/databases/{database_id}``. - collection_ids: - Which collection ids to export. Unspecified means all - collections. - output_uri_prefix: - The output URI. Currently only supports Google Cloud Storage - URIs of the form: ``gs://BUCKET_NAME[/NAMESPACE_PATH]``, where - ``BUCKET_NAME`` is the name of the Google Cloud Storage bucket - and ``NAMESPACE_PATH`` is an optional Google Cloud Storage - namespace path. When choosing a name, be sure to consider - Google Cloud Storage naming guidelines: - https://cloud.google.com/storage/docs/naming. If the URI is a - bucket (without a namespace path), a prefix will be generated - based on the start time. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsRequest) - ), -) -_sym_db.RegisterMessage(ExportDocumentsRequest) - -ImportDocumentsRequest = _reflection.GeneratedProtocolMessageType( - "ImportDocumentsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDOCUMENTSREQUEST, - __module__="google.cloud.firestore.admin_v1.proto.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. - - - Attributes: - name: - Required. Database to import into. Should be of the form: - ``projects/{project_id}/databases/{database_id}``. - collection_ids: - Which collection ids to import. Unspecified means all - collections included in the import. - input_uri_prefix: - Location of the exported files. This must match the - output\_uri\_prefix of an ExportDocumentsResponse from an - export that has completed successfully. See: [google.firestore - .admin.v1.ExportDocumentsResponse.output\_uri\_prefix][google. - firestore.admin.v1.ExportDocumentsResponse.output\_uri\_prefix - ]. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ImportDocumentsRequest) - ), -) -_sym_db.RegisterMessage(ImportDocumentsRequest) - - -DESCRIPTOR._options = None -_CREATEINDEXREQUEST.fields_by_name["parent"]._options = None -_CREATEINDEXREQUEST.fields_by_name["index"]._options = None -_LISTINDEXESREQUEST.fields_by_name["parent"]._options = None -_GETINDEXREQUEST.fields_by_name["name"]._options = None -_DELETEINDEXREQUEST.fields_by_name["name"]._options = None -_UPDATEFIELDREQUEST.fields_by_name["field"]._options = None -_GETFIELDREQUEST.fields_by_name["name"]._options = None -_LISTFIELDSREQUEST.fields_by_name["parent"]._options = None -_EXPORTDOCUMENTSREQUEST.fields_by_name["name"]._options = None -_IMPORTDOCUMENTSREQUEST.fields_by_name["name"]._options = None - -_FIRESTOREADMIN = _descriptor.ServiceDescriptor( - name="FirestoreAdmin", - full_name="google.firestore.admin.v1.FirestoreAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\030firestore.googleapis.com\322AXhttps://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore" - ), - serialized_start=1650, - serialized_end=3559, - methods=[ - _descriptor.MethodDescriptor( - name="CreateIndex", - full_name="google.firestore.admin.v1.FirestoreAdmin.CreateIndex", - index=0, - containing_service=None, - input_type=_CREATEINDEXREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002G">/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes:\005index\332A\014parent,index\312A\037\n\005Index\022\026IndexOperationMetadata' - ), - ), - _descriptor.MethodDescriptor( - name="ListIndexes", - full_name="google.firestore.admin.v1.FirestoreAdmin.ListIndexes", - index=1, - containing_service=None, - input_type=_LISTINDEXESREQUEST, - output_type=_LISTINDEXESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1/{parent=projects/*/databases/*/collectionGroups/*}/indexes\332A\006parent" - ), - ), - _descriptor.MethodDescriptor( - name="GetIndex", - full_name="google.firestore.admin.v1.FirestoreAdmin.GetIndex", - index=2, - containing_service=None, - input_type=_GETINDEXREQUEST, - output_type=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2._INDEX, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteIndex", - full_name="google.firestore.admin.v1.FirestoreAdmin.DeleteIndex", - index=3, - containing_service=None, - input_type=_DELETEINDEXREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002@*>/v1/{name=projects/*/databases/*/collectionGroups/*/indexes/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="GetField", - full_name="google.firestore.admin.v1.FirestoreAdmin.GetField", - index=4, - containing_service=None, - input_type=_GETFIELDREQUEST, - output_type=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2._FIELD, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{name=projects/*/databases/*/collectionGroups/*/fields/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="UpdateField", - full_name="google.firestore.admin.v1.FirestoreAdmin.UpdateField", - index=5, - containing_service=None, - input_type=_UPDATEFIELDREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002L2C/v1/{field.name=projects/*/databases/*/collectionGroups/*/fields/*}:\005field\332A\005field\312A\037\n\005Field\022\026FieldOperationMetadata" - ), - ), - _descriptor.MethodDescriptor( - name="ListFields", - full_name="google.firestore.admin.v1.FirestoreAdmin.ListFields", - index=6, - containing_service=None, - input_type=_LISTFIELDSREQUEST, - output_type=_LISTFIELDSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{parent=projects/*/databases/*/collectionGroups/*}/fields\332A\006parent" - ), - ), - _descriptor.MethodDescriptor( - name="ExportDocuments", - full_name="google.firestore.admin.v1.FirestoreAdmin.ExportDocuments", - index=7, - containing_service=None, - input_type=_EXPORTDOCUMENTSREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/databases/*}:exportDocuments:\001*\332A\004name\312A2\n\027ExportDocumentsResponse\022\027ExportDocumentsMetadata' - ), - ), - _descriptor.MethodDescriptor( - name="ImportDocuments", - full_name="google.firestore.admin.v1.FirestoreAdmin.ImportDocuments", - index=8, - containing_service=None, - input_type=_IMPORTDOCUMENTSREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/databases/*}:importDocuments:\001*\332A\004name\312A0\n\025google.protobuf.Empty\022\027ImportDocumentsMetadata' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_FIRESTOREADMIN) - -DESCRIPTOR.services_by_name["FirestoreAdmin"] = _FIRESTOREADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2_grpc.py b/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2_grpc.py deleted file mode 100644 index 269e920b3ac2..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/firestore_admin_pb2_grpc.py +++ /dev/null @@ -1,227 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.firestore_admin_v1.proto import ( - field_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2, -) -from google.cloud.firestore_admin_v1.proto import ( - firestore_admin_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2, -) -from google.cloud.firestore_admin_v1.proto import ( - index_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class FirestoreAdminStub(object): - """Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateIndex = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/CreateIndex", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.CreateIndexRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListIndexes = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/ListIndexes", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesResponse.FromString, - ) - self.GetIndex = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/GetIndex", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetIndexRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.Index.FromString, - ) - self.DeleteIndex = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/DeleteIndex", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.DeleteIndexRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetField = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/GetField", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetFieldRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2.Field.FromString, - ) - self.UpdateField = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/UpdateField", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.UpdateFieldRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListFields = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/ListFields", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsResponse.FromString, - ) - self.ExportDocuments = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/ExportDocuments", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ExportDocumentsRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ImportDocuments = channel.unary_unary( - "/google.firestore.admin.v1.FirestoreAdmin/ImportDocuments", - request_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ImportDocumentsRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - - -class FirestoreAdminServicer(object): - """Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def CreateIndex(self, request, context): - """Creates a composite index. This returns a [google.longrunning.Operation][google.longrunning.Operation] - which may be used to track the status of the creation. The metadata for - the operation will be the type [IndexOperationMetadata][google.firestore.admin.v1.IndexOperationMetadata]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListIndexes(self, request, context): - """Lists composite indexes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIndex(self, request, context): - """Gets a composite index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteIndex(self, request, context): - """Deletes a composite index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetField(self, request, context): - """Gets the metadata and configuration for a Field. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateField(self, request, context): - """Updates a field configuration. Currently, field updates apply only to - single field index configuration. However, calls to - [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField] should provide a field mask to avoid - changing any configuration that the caller isn't aware of. The field mask - should be specified as: `{ paths: "index_config" }`. - - This call returns a [google.longrunning.Operation][google.longrunning.Operation] which may be used to - track the status of the field update. The metadata for - the operation will be the type [FieldOperationMetadata][google.firestore.admin.v1.FieldOperationMetadata]. - - To configure the default field settings for the database, use - the special `Field` with resource name: - `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListFields(self, request, context): - """Lists the field configuration and metadata for this database. - - Currently, [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] only supports listing fields - that have been explicitly overridden. To issue this query, call - [FirestoreAdmin.ListFields][google.firestore.admin.v1.FirestoreAdmin.ListFields] with the filter set to - `indexConfig.usesAncestorConfig:false`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExportDocuments(self, request, context): - """Exports a copy of all or a subset of documents from Google Cloud Firestore - to another storage system, such as Google Cloud Storage. Recent updates to - documents may not be reflected in the export. The export occurs in the - background and its progress can be monitored and managed via the - Operation resource that is created. The output of an export may only be - used once the associated operation is done. If an export operation is - cancelled before completion it may leave partial data behind in Google - Cloud Storage. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ImportDocuments(self, request, context): - """Imports documents into Google Cloud Firestore. Existing documents with the - same name are overwritten. The import occurs in the background and its - progress can be monitored and managed via the Operation resource that is - created. If an ImportDocuments operation is cancelled, it is possible - that a subset of the data has already been imported to Cloud Firestore. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_FirestoreAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateIndex": grpc.unary_unary_rpc_method_handler( - servicer.CreateIndex, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.CreateIndexRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListIndexes": grpc.unary_unary_rpc_method_handler( - servicer.ListIndexes, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListIndexesResponse.SerializeToString, - ), - "GetIndex": grpc.unary_unary_rpc_method_handler( - servicer.GetIndex, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetIndexRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.Index.SerializeToString, - ), - "DeleteIndex": grpc.unary_unary_rpc_method_handler( - servicer.DeleteIndex, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.DeleteIndexRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetField": grpc.unary_unary_rpc_method_handler( - servicer.GetField, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.GetFieldRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_field__pb2.Field.SerializeToString, - ), - "UpdateField": grpc.unary_unary_rpc_method_handler( - servicer.UpdateField, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.UpdateFieldRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListFields": grpc.unary_unary_rpc_method_handler( - servicer.ListFields, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ListFieldsResponse.SerializeToString, - ), - "ExportDocuments": grpc.unary_unary_rpc_method_handler( - servicer.ExportDocuments, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ExportDocumentsRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ImportDocuments": grpc.unary_unary_rpc_method_handler( - servicer.ImportDocuments, - request_deserializer=google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_firestore__admin__pb2.ImportDocumentsRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.firestore.admin.v1.FirestoreAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/index.proto b/firestore/google/cloud/firestore_admin_v1/proto/index.proto deleted file mode 100644 index 4b9c6e35b112..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/index.proto +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/api/resource.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "IndexProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; - -// Cloud Firestore indexes enable simple and complex queries against -// documents in a database. -message Index { - option (google.api.resource) = { - type: "firestore.googleapis.com/Index" - pattern: "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}" - }; - - // A field in an index. - // The field_path describes which field is indexed, the value_mode describes - // how the field value is indexed. - message IndexField { - // The supported orderings. - enum Order { - // The ordering is unspecified. Not a valid option. - ORDER_UNSPECIFIED = 0; - - // The field is ordered by ascending field value. - ASCENDING = 1; - - // The field is ordered by descending field value. - DESCENDING = 2; - } - - // The supported array value configurations. - enum ArrayConfig { - // The index does not support additional array queries. - ARRAY_CONFIG_UNSPECIFIED = 0; - - // The index supports array containment queries. - CONTAINS = 1; - } - - // Can be __name__. - // For single field indexes, this must match the name of the field or may - // be omitted. - string field_path = 1; - - // How the field value is indexed. - oneof value_mode { - // Indicates that this field supports ordering by the specified order or - // comparing using =, <, <=, >, >=. - Order order = 2; - - // Indicates that this field supports operations on `array_value`s. - ArrayConfig array_config = 3; - } - } - - // Query Scope defines the scope at which a query is run. This is specified on - // a StructuredQuery's `from` field. - enum QueryScope { - // The query scope is unspecified. Not a valid option. - QUERY_SCOPE_UNSPECIFIED = 0; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified - // at query time, and that has the collection id specified by the index. - COLLECTION = 1; - - // Indexes with a collection group query scope specified allow queries - // against all collections that has the collection id specified by the - // index. - COLLECTION_GROUP = 2; - } - - // The state of an index. During index creation, an index will be in the - // `CREATING` state. If the index is created successfully, it will transition - // to the `READY` state. If the index creation encounters a problem, the index - // will transition to the `NEEDS_REPAIR` state. - enum State { - // The state is unspecified. - STATE_UNSPECIFIED = 0; - - // The index is being created. - // There is an active long-running operation for the index. - // The index is updated when writing a document. - // Some index data may exist. - CREATING = 1; - - // The index is ready to be used. - // The index is updated when writing a document. - // The index is fully populated from all stored documents it applies to. - READY = 2; - - // The index was being created, but something went wrong. - // There is no active long-running operation for the index, - // and the most recently finished long-running operation failed. - // The index is not updated when writing a document. - // Some index data may exist. - // Use the google.longrunning.Operations API to determine why the operation - // that last attempted to create this index failed, then re-create the - // index. - NEEDS_REPAIR = 3; - } - - // Output only. A server defined name for this index. - // The form of this name for composite indexes will be: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{composite_index_id}` - // For single field indexes, this field will be empty. - string name = 1; - - // Indexes with a collection query scope specified allow queries - // against a collection that is the child of a specific document, specified at - // query time, and that has the same collection id. - // - // Indexes with a collection group query scope specified allow queries against - // all collections descended from a specific document, specified at query - // time, and that have the same collection id as this index. - QueryScope query_scope = 2; - - // The fields supported by this index. - // - // For composite indexes, this is always 2 or more fields. - // The last field entry is always for the field path `__name__`. If, on - // creation, `__name__` was not specified as the last field, it will be added - // automatically with the same direction as that of the last field defined. If - // the final field in a composite index is not directional, the `__name__` - // will be ordered ASCENDING (unless explicitly specified). - // - // For single field indexes, this will always be exactly one entry with a - // field path equal to the field path of the associated field. - repeated IndexField fields = 3; - - // Output only. The serving state of the index. - State state = 4; -} diff --git a/firestore/google/cloud/firestore_admin_v1/proto/index_pb2.py b/firestore/google/cloud/firestore_admin_v1/proto/index_pb2.py deleted file mode 100644 index 85356236dd95..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/index_pb2.py +++ /dev/null @@ -1,429 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore/admin_v1/proto/index.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore/admin_v1/proto/index.proto", - package="google.firestore.admin.v1", - syntax="proto3", - serialized_options=_b( - "\n\035com.google.firestore.admin.v1B\nIndexProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1" - ), - serialized_pb=_b( - '\n1google/cloud/firestore/admin_v1/proto/index.proto\x12\x19google.firestore.admin.v1\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto"\xa3\x06\n\x05Index\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bquery_scope\x18\x02 \x01(\x0e\x32+.google.firestore.admin.v1.Index.QueryScope\x12;\n\x06\x66ields\x18\x03 \x03(\x0b\x32+.google.firestore.admin.v1.Index.IndexField\x12\x35\n\x05state\x18\x04 \x01(\x0e\x32&.google.firestore.admin.v1.Index.State\x1a\xbd\x02\n\nIndexField\x12\x12\n\nfield_path\x18\x01 \x01(\t\x12\x42\n\x05order\x18\x02 \x01(\x0e\x32\x31.google.firestore.admin.v1.Index.IndexField.OrderH\x00\x12O\n\x0c\x61rray_config\x18\x03 \x01(\x0e\x32\x37.google.firestore.admin.v1.Index.IndexField.ArrayConfigH\x00"=\n\x05Order\x12\x15\n\x11ORDER_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x01\x12\x0e\n\nDESCENDING\x10\x02"9\n\x0b\x41rrayConfig\x12\x1c\n\x18\x41RRAY_CONFIG_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43ONTAINS\x10\x01\x42\x0c\n\nvalue_mode"O\n\nQueryScope\x12\x1b\n\x17QUERY_SCOPE_UNSPECIFIED\x10\x00\x12\x0e\n\nCOLLECTION\x10\x01\x12\x14\n\x10\x43OLLECTION_GROUP\x10\x02"I\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02\x12\x10\n\x0cNEEDS_REPAIR\x10\x03:z\xea\x41w\n\x1e\x66irestore.googleapis.com/Index\x12Uprojects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}B\xb8\x01\n\x1d\x63om.google.firestore.admin.v1B\nIndexProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_INDEX_INDEXFIELD_ORDER = _descriptor.EnumDescriptor( - name="Order", - full_name="google.firestore.admin.v1.Index.IndexField.Order", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="ORDER_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ASCENDING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DESCENDING", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=527, - serialized_end=588, -) -_sym_db.RegisterEnumDescriptor(_INDEX_INDEXFIELD_ORDER) - -_INDEX_INDEXFIELD_ARRAYCONFIG = _descriptor.EnumDescriptor( - name="ArrayConfig", - full_name="google.firestore.admin.v1.Index.IndexField.ArrayConfig", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="ARRAY_CONFIG_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CONTAINS", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=590, - serialized_end=647, -) -_sym_db.RegisterEnumDescriptor(_INDEX_INDEXFIELD_ARRAYCONFIG) - -_INDEX_QUERYSCOPE = _descriptor.EnumDescriptor( - name="QueryScope", - full_name="google.firestore.admin.v1.Index.QueryScope", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="QUERY_SCOPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="COLLECTION", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="COLLECTION_GROUP", - index=2, - number=2, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=663, - serialized_end=742, -) -_sym_db.RegisterEnumDescriptor(_INDEX_QUERYSCOPE) - -_INDEX_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.firestore.admin.v1.Index.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="READY", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="NEEDS_REPAIR", index=3, number=3, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=744, - serialized_end=817, -) -_sym_db.RegisterEnumDescriptor(_INDEX_STATE) - - -_INDEX_INDEXFIELD = _descriptor.Descriptor( - name="IndexField", - full_name="google.firestore.admin.v1.Index.IndexField", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field_path", - full_name="google.firestore.admin.v1.Index.IndexField.field_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order", - full_name="google.firestore.admin.v1.Index.IndexField.order", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="array_config", - full_name="google.firestore.admin.v1.Index.IndexField.array_config", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INDEX_INDEXFIELD_ORDER, _INDEX_INDEXFIELD_ARRAYCONFIG], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="value_mode", - full_name="google.firestore.admin.v1.Index.IndexField.value_mode", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=344, - serialized_end=661, -) - -_INDEX = _descriptor.Descriptor( - name="Index", - full_name="google.firestore.admin.v1.Index", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1.Index.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query_scope", - full_name="google.firestore.admin.v1.Index.query_scope", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.admin.v1.Index.fields", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.firestore.admin.v1.Index.state", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_INDEX_INDEXFIELD], - enum_types=[_INDEX_QUERYSCOPE, _INDEX_STATE], - serialized_options=_b( - "\352Aw\n\036firestore.googleapis.com/Index\022Uprojects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}" - ), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=138, - serialized_end=941, -) - -_INDEX_INDEXFIELD.fields_by_name["order"].enum_type = _INDEX_INDEXFIELD_ORDER -_INDEX_INDEXFIELD.fields_by_name[ - "array_config" -].enum_type = _INDEX_INDEXFIELD_ARRAYCONFIG -_INDEX_INDEXFIELD.containing_type = _INDEX -_INDEX_INDEXFIELD_ORDER.containing_type = _INDEX_INDEXFIELD -_INDEX_INDEXFIELD_ARRAYCONFIG.containing_type = _INDEX_INDEXFIELD -_INDEX_INDEXFIELD.oneofs_by_name["value_mode"].fields.append( - _INDEX_INDEXFIELD.fields_by_name["order"] -) -_INDEX_INDEXFIELD.fields_by_name[ - "order" -].containing_oneof = _INDEX_INDEXFIELD.oneofs_by_name["value_mode"] -_INDEX_INDEXFIELD.oneofs_by_name["value_mode"].fields.append( - _INDEX_INDEXFIELD.fields_by_name["array_config"] -) -_INDEX_INDEXFIELD.fields_by_name[ - "array_config" -].containing_oneof = _INDEX_INDEXFIELD.oneofs_by_name["value_mode"] -_INDEX.fields_by_name["query_scope"].enum_type = _INDEX_QUERYSCOPE -_INDEX.fields_by_name["fields"].message_type = _INDEX_INDEXFIELD -_INDEX.fields_by_name["state"].enum_type = _INDEX_STATE -_INDEX_QUERYSCOPE.containing_type = _INDEX -_INDEX_STATE.containing_type = _INDEX -DESCRIPTOR.message_types_by_name["Index"] = _INDEX -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Index = _reflection.GeneratedProtocolMessageType( - "Index", - (_message.Message,), - dict( - IndexField=_reflection.GeneratedProtocolMessageType( - "IndexField", - (_message.Message,), - dict( - DESCRIPTOR=_INDEX_INDEXFIELD, - __module__="google.cloud.firestore.admin_v1.proto.index_pb2", - __doc__="""A field in an index. The field\_path describes which field - is indexed, the value\_mode describes how the field value is indexed. - - - Attributes: - field_path: - Can be **name**. For single field indexes, this must match the - name of the field or may be omitted. - value_mode: - How the field value is indexed. - order: - Indicates that this field supports ordering by the specified - order or comparing using =, <, <=, >, >=. - array_config: - Indicates that this field supports operations on - ``array_value``\ s. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Index.IndexField) - ), - ), - DESCRIPTOR=_INDEX, - __module__="google.cloud.firestore.admin_v1.proto.index_pb2", - __doc__="""Cloud Firestore indexes enable simple and complex queries - against documents in a database. - - - Attributes: - name: - Output only. A server defined name for this index. The form of - this name for composite indexes will be: ``projects/{project_i - d}/databases/{database_id}/collectionGroups/{collection_id}/in - dexes/{composite_index_id}`` For single field indexes, this - field will be empty. - query_scope: - Indexes with a collection query scope specified allow queries - against a collection that is the child of a specific document, - specified at query time, and that has the same collection id. - Indexes with a collection group query scope specified allow - queries against all collections descended from a specific - document, specified at query time, and that have the same - collection id as this index. - fields: - The fields supported by this index. For composite indexes, - this is always 2 or more fields. The last field entry is - always for the field path ``__name__``. If, on creation, - ``__name__`` was not specified as the last field, it will be - added automatically with the same direction as that of the - last field defined. If the final field in a composite index is - not directional, the ``__name__`` will be ordered ASCENDING - (unless explicitly specified). For single field indexes, this - will always be exactly one entry with a field path equal to - the field path of the associated field. - state: - Output only. The serving state of the index. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Index) - ), -) -_sym_db.RegisterMessage(Index) -_sym_db.RegisterMessage(Index.IndexField) - - -DESCRIPTOR._options = None -_INDEX._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/index_pb2_grpc.py b/firestore/google/cloud/firestore_admin_v1/proto/index_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/index_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_admin_v1/proto/location.proto b/firestore/google/cloud/firestore_admin_v1/proto/location.proto deleted file mode 100644 index d9dc6f9b9820..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/location.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/type/latlng.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "LocationProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; - -// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. -message LocationMetadata { - -} diff --git a/firestore/google/cloud/firestore_admin_v1/proto/location_pb2.py b/firestore/google/cloud/firestore_admin_v1/proto/location_pb2.py deleted file mode 100644 index 78258954112a..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/location_pb2.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore/admin_v1/proto/location.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore/admin_v1/proto/location.proto", - package="google.firestore.admin.v1", - syntax="proto3", - serialized_options=_b( - "\n\035com.google.firestore.admin.v1B\rLocationProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1" - ), - serialized_pb=_b( - '\n4google/cloud/firestore/admin_v1/proto/location.proto\x12\x19google.firestore.admin.v1\x1a\x18google/type/latlng.proto\x1a\x1cgoogle/api/annotations.proto"\x12\n\x10LocationMetadataB\xbb\x01\n\x1d\x63om.google.firestore.admin.v1B\rLocationProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1b\x06proto3' - ), - dependencies=[ - google_dot_type_dot_latlng__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_LOCATIONMETADATA = _descriptor.Descriptor( - name="LocationMetadata", - full_name="google.firestore.admin.v1.LocationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=139, - serialized_end=157, -) - -DESCRIPTOR.message_types_by_name["LocationMetadata"] = _LOCATIONMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -LocationMetadata = _reflection.GeneratedProtocolMessageType( - "LocationMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_LOCATIONMETADATA, - __module__="google.cloud.firestore.admin_v1.proto.location_pb2", - __doc__="""The metadata message for - [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. - - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.LocationMetadata) - ), -) -_sym_db.RegisterMessage(LocationMetadata) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/location_pb2_grpc.py b/firestore/google/cloud/firestore_admin_v1/proto/location_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/location_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_admin_v1/proto/operation.proto b/firestore/google/cloud/firestore_admin_v1/proto/operation.proto deleted file mode 100644 index 08194fe09341..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/operation.proto +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1; - -import "google/firestore/admin/v1/index.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1;admin"; -option java_multiple_files = true; -option java_outer_classname = "OperationProto"; -option java_package = "com.google.firestore.admin.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\Admin\\V1"; - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. -message IndexOperationMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string index = 3; - - // The state of the operation. - OperationState state = 4; - - // The progress, in documents, of this operation. - Progress progress_documents = 5; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 6; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. -message FieldOperationMetadata { - // Information about an index configuration change. - message IndexConfigDelta { - // Specifies how the index is changing. - enum ChangeType { - // The type of change is not specified or known. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The single field index is being added. - ADD = 1; - - // The single field index is being removed. - REMOVE = 2; - } - - // Specifies how the index is changing. - ChangeType change_type = 1; - - // The index being changed. - Index index = 2; - } - - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The field resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - string field = 3; - - // A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this - // operation. - repeated IndexConfigDelta index_config_deltas = 4; - - // The state of the operation. - OperationState state = 5; - - // The progress, in documents, of this operation. - Progress progress_documents = 6; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Describes the progress of the operation. -// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1.Progress] -// is used. -message Progress { - // The amount of work estimated. - int64 estimated_work = 1; - - // The amount of work completed. - int64 completed_work = 2; -} - -// Describes the state of the operation. -enum OperationState { - // Unspecified. - OPERATION_STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} diff --git a/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2.py b/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2.py deleted file mode 100644 index d34dd007f049..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2.py +++ /dev/null @@ -1,1110 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore/admin_v1/proto/operation.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_admin_v1.proto import ( - index_pb2 as google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore/admin_v1/proto/operation.proto", - package="google.firestore.admin.v1", - syntax="proto3", - serialized_options=_b( - "\n\035com.google.firestore.admin.v1B\016OperationProtoP\001Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\242\002\004GCFS\252\002\037Google.Cloud.Firestore.Admin.V1\312\002\037Google\\Cloud\\Firestore\\Admin\\V1" - ), - serialized_pb=_b( - '\n5google/cloud/firestore/admin_v1/proto/operation.proto\x12\x19google.firestore.admin.v1\x1a\x31google/cloud/firestore/admin_v1/proto/index.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xbd\x02\n\x16IndexOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05index\x18\x03 \x01(\t\x12\x38\n\x05state\x18\x04 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress"\x88\x05\n\x16\x46ieldOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x66ield\x18\x03 \x01(\t\x12_\n\x13index_config_deltas\x18\x04 \x03(\x0b\x32\x42.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta\x12\x38\n\x05state\x18\x05 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x06 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x07 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x1a\xe7\x01\n\x10IndexConfigDelta\x12\x62\n\x0b\x63hange_type\x18\x01 \x01(\x0e\x32M.google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType\x12/\n\x05index\x18\x02 \x01(\x0b\x32 .google.firestore.admin.v1.Index">\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02"\xec\x02\n\x17\x45xportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x19\n\x11output_uri_prefix\x18\x07 \x01(\t"\xeb\x02\n\x17ImportDocumentsMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x42\n\x0foperation_state\x18\x03 \x01(\x0e\x32).google.firestore.admin.v1.OperationState\x12?\n\x12progress_documents\x18\x04 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12;\n\x0eprogress_bytes\x18\x05 \x01(\x0b\x32#.google.firestore.admin.v1.Progress\x12\x16\n\x0e\x63ollection_ids\x18\x06 \x03(\t\x12\x18\n\x10input_uri_prefix\x18\x07 \x01(\t"4\n\x17\x45xportDocumentsResponse\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t":\n\x08Progress\x12\x16\n\x0e\x65stimated_work\x18\x01 \x01(\x03\x12\x16\n\x0e\x63ompleted_work\x18\x02 \x01(\x03*\x9e\x01\n\x0eOperationState\x12\x1f\n\x1bOPERATION_STATE_UNSPECIFIED\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x0e\n\nPROCESSING\x10\x02\x12\x0e\n\nCANCELLING\x10\x03\x12\x0e\n\nFINALIZING\x10\x04\x12\x0e\n\nSUCCESSFUL\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\r\n\tCANCELLED\x10\x07\x42\xbc\x01\n\x1d\x63om.google.firestore.admin.v1B\x0eOperationProtoP\x01Z>google.golang.org/genproto/googleapis/firestore/admin/v1;admin\xa2\x02\x04GCFS\xaa\x02\x1fGoogle.Cloud.Firestore.Admin.V1\xca\x02\x1fGoogle\\Cloud\\Firestore\\Admin\\V1b\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - -_OPERATIONSTATE = _descriptor.EnumDescriptor( - name="OperationState", - full_name="google.firestore.admin.v1.OperationState", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="OPERATION_STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="INITIALIZING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="PROCESSING", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CANCELLING", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="FINALIZING", index=4, number=4, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="SUCCESSFUL", index=5, number=5, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="FAILED", index=6, number=6, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CANCELLED", index=7, number=7, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2017, - serialized_end=2175, -) -_sym_db.RegisterEnumDescriptor(_OPERATIONSTATE) - -OperationState = enum_type_wrapper.EnumTypeWrapper(_OPERATIONSTATE) -OPERATION_STATE_UNSPECIFIED = 0 -INITIALIZING = 1 -PROCESSING = 2 -CANCELLING = 3 -FINALIZING = 4 -SUCCESSFUL = 5 -FAILED = 6 -CANCELLED = 7 - - -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE = _descriptor.EnumDescriptor( - name="ChangeType", - full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.ChangeType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="CHANGE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ADD", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REMOVE", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1105, - serialized_end=1167, -) -_sym_db.RegisterEnumDescriptor(_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE) - - -_INDEXOPERATIONMETADATA = _descriptor.Descriptor( - name="IndexOperationMetadata", - full_name="google.firestore.admin.v1.IndexOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.firestore.admin.v1.IndexOperationMetadata.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.firestore.admin.v1.IndexOperationMetadata.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="index", - full_name="google.firestore.admin.v1.IndexOperationMetadata.index", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.firestore.admin.v1.IndexOperationMetadata.state", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_documents", - full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_documents", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_bytes", - full_name="google.firestore.admin.v1.IndexOperationMetadata.progress_bytes", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=199, - serialized_end=516, -) - - -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA = _descriptor.Descriptor( - name="IndexConfigDelta", - full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="change_type", - full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.change_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="index", - full_name="google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta.index", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=936, - serialized_end=1167, -) - -_FIELDOPERATIONMETADATA = _descriptor.Descriptor( - name="FieldOperationMetadata", - full_name="google.firestore.admin.v1.FieldOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.firestore.admin.v1.FieldOperationMetadata.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.firestore.admin.v1.FieldOperationMetadata.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field", - full_name="google.firestore.admin.v1.FieldOperationMetadata.field", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="index_config_deltas", - full_name="google.firestore.admin.v1.FieldOperationMetadata.index_config_deltas", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.firestore.admin.v1.FieldOperationMetadata.state", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_documents", - full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_documents", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_bytes", - full_name="google.firestore.admin.v1.FieldOperationMetadata.progress_bytes", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=519, - serialized_end=1167, -) - - -_EXPORTDOCUMENTSMETADATA = _descriptor.Descriptor( - name="ExportDocumentsMetadata", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="operation_state", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.operation_state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_documents", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_documents", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_bytes", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.progress_bytes", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_ids", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.collection_ids", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="output_uri_prefix", - full_name="google.firestore.admin.v1.ExportDocumentsMetadata.output_uri_prefix", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1170, - serialized_end=1534, -) - - -_IMPORTDOCUMENTSMETADATA = _descriptor.Descriptor( - name="ImportDocumentsMetadata", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="operation_state", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.operation_state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_documents", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_documents", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="progress_bytes", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.progress_bytes", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_ids", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.collection_ids", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="input_uri_prefix", - full_name="google.firestore.admin.v1.ImportDocumentsMetadata.input_uri_prefix", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1537, - serialized_end=1900, -) - - -_EXPORTDOCUMENTSRESPONSE = _descriptor.Descriptor( - name="ExportDocumentsResponse", - full_name="google.firestore.admin.v1.ExportDocumentsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri_prefix", - full_name="google.firestore.admin.v1.ExportDocumentsResponse.output_uri_prefix", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1902, - serialized_end=1954, -) - - -_PROGRESS = _descriptor.Descriptor( - name="Progress", - full_name="google.firestore.admin.v1.Progress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="estimated_work", - full_name="google.firestore.admin.v1.Progress.estimated_work", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="completed_work", - full_name="google.firestore.admin.v1.Progress.completed_work", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1956, - serialized_end=2014, -) - -_INDEXOPERATIONMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_INDEXOPERATIONMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_INDEXOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE -_INDEXOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS -_INDEXOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[ - "change_type" -].enum_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.fields_by_name[ - "index" -].message_type = ( - google_dot_cloud_dot_firestore_dot_admin__v1_dot_proto_dot_index__pb2._INDEX -) -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA.containing_type = _FIELDOPERATIONMETADATA -_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA_CHANGETYPE.containing_type = ( - _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA -) -_FIELDOPERATIONMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_FIELDOPERATIONMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_FIELDOPERATIONMETADATA.fields_by_name[ - "index_config_deltas" -].message_type = _FIELDOPERATIONMETADATA_INDEXCONFIGDELTA -_FIELDOPERATIONMETADATA.fields_by_name["state"].enum_type = _OPERATIONSTATE -_FIELDOPERATIONMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS -_FIELDOPERATIONMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS -_EXPORTDOCUMENTSMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_EXPORTDOCUMENTSMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_EXPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE -_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS -_EXPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS -_IMPORTDOCUMENTSMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_IMPORTDOCUMENTSMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_IMPORTDOCUMENTSMETADATA.fields_by_name["operation_state"].enum_type = _OPERATIONSTATE -_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_documents"].message_type = _PROGRESS -_IMPORTDOCUMENTSMETADATA.fields_by_name["progress_bytes"].message_type = _PROGRESS -DESCRIPTOR.message_types_by_name["IndexOperationMetadata"] = _INDEXOPERATIONMETADATA -DESCRIPTOR.message_types_by_name["FieldOperationMetadata"] = _FIELDOPERATIONMETADATA -DESCRIPTOR.message_types_by_name["ExportDocumentsMetadata"] = _EXPORTDOCUMENTSMETADATA -DESCRIPTOR.message_types_by_name["ImportDocumentsMetadata"] = _IMPORTDOCUMENTSMETADATA -DESCRIPTOR.message_types_by_name["ExportDocumentsResponse"] = _EXPORTDOCUMENTSRESPONSE -DESCRIPTOR.message_types_by_name["Progress"] = _PROGRESS -DESCRIPTOR.enum_types_by_name["OperationState"] = _OPERATIONSTATE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -IndexOperationMetadata = _reflection.GeneratedProtocolMessageType( - "IndexOperationMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_INDEXOPERATIONMETADATA, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Metadata for - [google.longrunning.Operation][google.longrunning.Operation] results - from - [FirestoreAdmin.CreateIndex][google.firestore.admin.v1.FirestoreAdmin.CreateIndex]. - - - Attributes: - start_time: - The time this operation started. - end_time: - The time this operation completed. Will be unset if operation - still in progress. - index: - The index resource that this operation is acting on. For - example: ``projects/{project_id}/databases/{database_id}/colle - ctionGroups/{collection_id}/indexes/{index_id}`` - state: - The state of the operation. - progress_documents: - The progress, in documents, of this operation. - progress_bytes: - The progress, in bytes, of this operation. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.IndexOperationMetadata) - ), -) -_sym_db.RegisterMessage(IndexOperationMetadata) - -FieldOperationMetadata = _reflection.GeneratedProtocolMessageType( - "FieldOperationMetadata", - (_message.Message,), - dict( - IndexConfigDelta=_reflection.GeneratedProtocolMessageType( - "IndexConfigDelta", - (_message.Message,), - dict( - DESCRIPTOR=_FIELDOPERATIONMETADATA_INDEXCONFIGDELTA, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Information about an index configuration change. - - - Attributes: - change_type: - Specifies how the index is changing. - index: - The index being changed. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata.IndexConfigDelta) - ), - ), - DESCRIPTOR=_FIELDOPERATIONMETADATA, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Metadata for - [google.longrunning.Operation][google.longrunning.Operation] results - from - [FirestoreAdmin.UpdateField][google.firestore.admin.v1.FirestoreAdmin.UpdateField]. - - - Attributes: - start_time: - The time this operation started. - end_time: - The time this operation completed. Will be unset if operation - still in progress. - field: - The field resource that this operation is acting on. For - example: ``projects/{project_id}/databases/{database_id}/colle - ctionGroups/{collection_id}/fields/{field_path}`` - index_config_deltas: - A list of [IndexConfigDelta][google.firestore.admin.v1.FieldOp - erationMetadata.IndexConfigDelta], which describe the intent - of this operation. - state: - The state of the operation. - progress_documents: - The progress, in documents, of this operation. - progress_bytes: - The progress, in bytes, of this operation. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.FieldOperationMetadata) - ), -) -_sym_db.RegisterMessage(FieldOperationMetadata) -_sym_db.RegisterMessage(FieldOperationMetadata.IndexConfigDelta) - -ExportDocumentsMetadata = _reflection.GeneratedProtocolMessageType( - "ExportDocumentsMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDOCUMENTSMETADATA, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Metadata for - [google.longrunning.Operation][google.longrunning.Operation] results - from - [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1.FirestoreAdmin.ExportDocuments]. - - - Attributes: - start_time: - The time this operation started. - end_time: - The time this operation completed. Will be unset if operation - still in progress. - operation_state: - The state of the export operation. - progress_documents: - The progress, in documents, of this operation. - progress_bytes: - The progress, in bytes, of this operation. - collection_ids: - Which collection ids are being exported. - output_uri_prefix: - Where the entities are being exported to. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsMetadata) - ), -) -_sym_db.RegisterMessage(ExportDocumentsMetadata) - -ImportDocumentsMetadata = _reflection.GeneratedProtocolMessageType( - "ImportDocumentsMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDOCUMENTSMETADATA, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Metadata for - [google.longrunning.Operation][google.longrunning.Operation] results - from - [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1.FirestoreAdmin.ImportDocuments]. - - - Attributes: - start_time: - The time this operation started. - end_time: - The time this operation completed. Will be unset if operation - still in progress. - operation_state: - The state of the import operation. - progress_documents: - The progress, in documents, of this operation. - progress_bytes: - The progress, in bytes, of this operation. - collection_ids: - Which collection ids are being imported. - input_uri_prefix: - The location of the documents being imported. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ImportDocumentsMetadata) - ), -) -_sym_db.RegisterMessage(ImportDocumentsMetadata) - -ExportDocumentsResponse = _reflection.GeneratedProtocolMessageType( - "ExportDocumentsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDOCUMENTSRESPONSE, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Returned in the - [google.longrunning.Operation][google.longrunning.Operation] response - field. - - - Attributes: - output_uri_prefix: - Location of the output files. This can be used to begin an - import into Cloud Firestore (this project or another project) - after the operation completes successfully. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.ExportDocumentsResponse) - ), -) -_sym_db.RegisterMessage(ExportDocumentsResponse) - -Progress = _reflection.GeneratedProtocolMessageType( - "Progress", - (_message.Message,), - dict( - DESCRIPTOR=_PROGRESS, - __module__="google.cloud.firestore.admin_v1.proto.operation_pb2", - __doc__="""Describes the progress of the operation. Unit of work is - generic and must be interpreted based on where - [Progress][google.firestore.admin.v1.Progress] is used. - - - Attributes: - estimated_work: - The amount of work estimated. - completed_work: - The amount of work completed. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1.Progress) - ), -) -_sym_db.RegisterMessage(Progress) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2_grpc.py b/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/proto/operation_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_admin_v1/types.py b/firestore/google/cloud/firestore_admin_v1/types.py deleted file mode 100644 index ca5f241644f6..000000000000 --- a/firestore/google/cloud/firestore_admin_v1/types.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.firestore_admin_v1.proto import field_pb2 -from google.cloud.firestore_admin_v1.proto import firestore_admin_pb2 -from google.cloud.firestore_admin_v1.proto import index_pb2 -from google.cloud.firestore_admin_v1.proto import location_pb2 -from google.cloud.firestore_admin_v1.proto import operation_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - operations_pb2, - any_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - field_pb2, - firestore_admin_pb2, - index_pb2, - location_pb2, - operation_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.firestore_admin_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/firestore/google/cloud/firestore_v1/__init__.py b/firestore/google/cloud/firestore_v1/__init__.py deleted file mode 100644 index e4af45218ecc..000000000000 --- a/firestore/google/cloud/firestore_v1/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2019 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Python idiomatic client for Google Cloud Firestore.""" - -from pkg_resources import get_distribution - -__version__ = get_distribution("google-cloud-firestore").version - -from google.cloud.firestore_v1 import types -from google.cloud.firestore_v1._helpers import GeoPoint -from google.cloud.firestore_v1._helpers import ExistsOption -from google.cloud.firestore_v1._helpers import LastUpdateOption -from google.cloud.firestore_v1._helpers import ReadAfterWriteError -from google.cloud.firestore_v1._helpers import WriteOption -from google.cloud.firestore_v1.batch import WriteBatch -from google.cloud.firestore_v1.client import Client -from google.cloud.firestore_v1.collection import CollectionReference -from google.cloud.firestore_v1.transforms import ArrayRemove -from google.cloud.firestore_v1.transforms import ArrayUnion -from google.cloud.firestore_v1.transforms import DELETE_FIELD -from google.cloud.firestore_v1.transforms import Increment -from google.cloud.firestore_v1.transforms import Maximum -from google.cloud.firestore_v1.transforms import Minimum -from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP -from google.cloud.firestore_v1.document import DocumentReference -from google.cloud.firestore_v1.document import DocumentSnapshot -from google.cloud.firestore_v1.gapic import enums -from google.cloud.firestore_v1.query import Query -from google.cloud.firestore_v1.transaction import Transaction -from google.cloud.firestore_v1.transaction import transactional -from google.cloud.firestore_v1.watch import Watch - - -__all__ = [ - "__version__", - "ArrayRemove", - "ArrayUnion", - "Client", - "CollectionReference", - "DELETE_FIELD", - "DocumentReference", - "DocumentSnapshot", - "enums", - "ExistsOption", - "GeoPoint", - "Increment", - "LastUpdateOption", - "Maximum", - "Minimum", - "Query", - "ReadAfterWriteError", - "SERVER_TIMESTAMP", - "Transaction", - "transactional", - "types", - "Watch", - "WriteBatch", - "WriteOption", -] diff --git a/firestore/google/cloud/firestore_v1/_helpers.py b/firestore/google/cloud/firestore_v1/_helpers.py deleted file mode 100644 index 09f5d7f41c0e..000000000000 --- a/firestore/google/cloud/firestore_v1/_helpers.py +++ /dev/null @@ -1,1049 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Common helpers shared across Google Cloud Firestore modules.""" - -import datetime - -from google.protobuf import struct_pb2 -from google.type import latlng_pb2 -import grpc -import six - -from google.cloud import exceptions -from google.cloud._helpers import _datetime_to_pb_timestamp -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -from google.cloud.firestore_v1 import transforms -from google.cloud.firestore_v1 import types -from google.cloud.firestore_v1.field_path import FieldPath -from google.cloud.firestore_v1.field_path import parse_field_path -from google.cloud.firestore_v1.gapic import enums -from google.cloud.firestore_v1.proto import common_pb2 -from google.cloud.firestore_v1.proto import document_pb2 -from google.cloud.firestore_v1.proto import write_pb2 - - -BAD_PATH_TEMPLATE = "A path element must be a string. Received {}, which is a {}." -DOCUMENT_PATH_DELIMITER = "/" -INACTIVE_TXN = "Transaction not in progress, cannot be used in API requests." -READ_AFTER_WRITE_ERROR = "Attempted read after write in a transaction." -BAD_REFERENCE_ERROR = ( - "Reference value {!r} in unexpected format, expected to be of the form " - "``projects/{{project}}/databases/{{database}}/" - "documents/{{document_path}}``." -) -WRONG_APP_REFERENCE = ( - "Document {!r} does not correspond to the same database " "({!r}) as the client." -) -REQUEST_TIME_ENUM = enums.DocumentTransform.FieldTransform.ServerValue.REQUEST_TIME -_GRPC_ERROR_MAPPING = { - grpc.StatusCode.ALREADY_EXISTS: exceptions.Conflict, - grpc.StatusCode.NOT_FOUND: exceptions.NotFound, -} - - -class GeoPoint(object): - """Simple container for a geo point value. - - Args: - latitude (float): Latitude of a point. - longitude (float): Longitude of a point. - """ - - def __init__(self, latitude, longitude): - self.latitude = latitude - self.longitude = longitude - - def to_protobuf(self): - """Convert the current object to protobuf. - - Returns: - google.type.latlng_pb2.LatLng: The current point as a protobuf. - """ - return latlng_pb2.LatLng(latitude=self.latitude, longitude=self.longitude) - - def __eq__(self, other): - """Compare two geo points for equality. - - Returns: - Union[bool, NotImplemented]: :data:`True` if the points compare - equal, else :data:`False`. (Or :data:`NotImplemented` if - ``other`` is not a geo point.) - """ - if not isinstance(other, GeoPoint): - return NotImplemented - - return self.latitude == other.latitude and self.longitude == other.longitude - - def __ne__(self, other): - """Compare two geo points for inequality. - - Returns: - Union[bool, NotImplemented]: :data:`False` if the points compare - equal, else :data:`True`. (Or :data:`NotImplemented` if - ``other`` is not a geo point.) - """ - equality_val = self.__eq__(other) - if equality_val is NotImplemented: - return NotImplemented - else: - return not equality_val - - -def verify_path(path, is_collection): - """Verifies that a ``path`` has the correct form. - - Checks that all of the elements in ``path`` are strings. - - Args: - path (Tuple[str, ...]): The components in a collection or - document path. - is_collection (bool): Indicates if the ``path`` represents - a document or a collection. - - Raises: - ValueError: if - - * the ``path`` is empty - * ``is_collection=True`` and there are an even number of elements - * ``is_collection=False`` and there are an odd number of elements - * an element is not a string - """ - num_elements = len(path) - if num_elements == 0: - raise ValueError("Document or collection path cannot be empty") - - if is_collection: - if num_elements % 2 == 0: - raise ValueError("A collection must have an odd number of path elements") - else: - if num_elements % 2 == 1: - raise ValueError("A document must have an even number of path elements") - - for element in path: - if not isinstance(element, six.string_types): - msg = BAD_PATH_TEMPLATE.format(element, type(element)) - raise ValueError(msg) - - -def encode_value(value): - """Converts a native Python value into a Firestore protobuf ``Value``. - - Args: - value (Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native - Python value to convert to a protobuf field. - - Returns: - ~google.cloud.firestore_v1.types.Value: A - value encoded as a Firestore protobuf. - - Raises: - TypeError: If the ``value`` is not one of the accepted types. - """ - if value is None: - return document_pb2.Value(null_value=struct_pb2.NULL_VALUE) - - # Must come before six.integer_types since ``bool`` is an integer subtype. - if isinstance(value, bool): - return document_pb2.Value(boolean_value=value) - - if isinstance(value, six.integer_types): - return document_pb2.Value(integer_value=value) - - if isinstance(value, float): - return document_pb2.Value(double_value=value) - - if isinstance(value, DatetimeWithNanoseconds): - return document_pb2.Value(timestamp_value=value.timestamp_pb()) - - if isinstance(value, datetime.datetime): - return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value)) - - if isinstance(value, six.text_type): - return document_pb2.Value(string_value=value) - - if isinstance(value, six.binary_type): - return document_pb2.Value(bytes_value=value) - - # NOTE: We avoid doing an isinstance() check for a Document - # here to avoid import cycles. - document_path = getattr(value, "_document_path", None) - if document_path is not None: - return document_pb2.Value(reference_value=document_path) - - if isinstance(value, GeoPoint): - return document_pb2.Value(geo_point_value=value.to_protobuf()) - - if isinstance(value, list): - value_list = [encode_value(element) for element in value] - value_pb = document_pb2.ArrayValue(values=value_list) - return document_pb2.Value(array_value=value_pb) - - if isinstance(value, dict): - value_dict = encode_dict(value) - value_pb = document_pb2.MapValue(fields=value_dict) - return document_pb2.Value(map_value=value_pb) - - raise TypeError( - "Cannot convert to a Firestore Value", value, "Invalid type", type(value) - ) - - -def encode_dict(values_dict): - """Encode a dictionary into protobuf ``Value``-s. - - Args: - values_dict (dict): The dictionary to encode as protobuf fields. - - Returns: - Dict[str, ~google.cloud.firestore_v1.types.Value]: A - dictionary of string keys and ``Value`` protobufs as dictionary - values. - """ - return {key: encode_value(value) for key, value in six.iteritems(values_dict)} - - -def reference_value_to_document(reference_value, client): - """Convert a reference value string to a document. - - Args: - reference_value (str): A document reference value. - client (:class:`~google.cloud.firestore_v1.client.Client`): - A client that has a document factory. - - Returns: - :class:`~google.cloud.firestore_v1.document.DocumentReference`: - The document corresponding to ``reference_value``. - - Raises: - ValueError: If the ``reference_value`` is not of the expected - format: ``projects/{project}/databases/{database}/documents/...``. - ValueError: If the ``reference_value`` does not come from the same - project / database combination as the ``client``. - """ - # The first 5 parts are - # projects, {project}, databases, {database}, documents - parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5) - if len(parts) != 6: - msg = BAD_REFERENCE_ERROR.format(reference_value) - raise ValueError(msg) - - # The sixth part is `a/b/c/d` (i.e. the document path) - document = client.document(parts[-1]) - if document._document_path != reference_value: - msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string) - raise ValueError(msg) - - return document - - -def decode_value(value, client): - """Converts a Firestore protobuf ``Value`` to a native Python value. - - Args: - value (google.cloud.firestore_v1.types.Value): A - Firestore protobuf to be decoded / parsed / converted. - client (:class:`~google.cloud.firestore_v1.client.Client`): - A client that has a document factory. - - Returns: - Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native - Python value converted from the ``value``. - - Raises: - NotImplementedError: If the ``value_type`` is ``reference_value``. - ValueError: If the ``value_type`` is unknown. - """ - value_type = value.WhichOneof("value_type") - - if value_type == "null_value": - return None - elif value_type == "boolean_value": - return value.boolean_value - elif value_type == "integer_value": - return value.integer_value - elif value_type == "double_value": - return value.double_value - elif value_type == "timestamp_value": - return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) - elif value_type == "string_value": - return value.string_value - elif value_type == "bytes_value": - return value.bytes_value - elif value_type == "reference_value": - return reference_value_to_document(value.reference_value, client) - elif value_type == "geo_point_value": - return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude) - elif value_type == "array_value": - return [decode_value(element, client) for element in value.array_value.values] - elif value_type == "map_value": - return decode_dict(value.map_value.fields, client) - else: - raise ValueError("Unknown ``value_type``", value_type) - - -def decode_dict(value_fields, client): - """Converts a protobuf map of Firestore ``Value``-s. - - Args: - value_fields (google.protobuf.pyext._message.MessageMapContainer): A - protobuf map of Firestore ``Value``-s. - client (:class:`~google.cloud.firestore_v1.client.Client`): - A client that has a document factory. - - Returns: - Dict[str, Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary - of native Python values converted from the ``value_fields``. - """ - return { - key: decode_value(value, client) for key, value in six.iteritems(value_fields) - } - - -def get_doc_id(document_pb, expected_prefix): - """Parse a document ID from a document protobuf. - - Args: - document_pb (google.cloud.proto.firestore.v1.\ - document_pb2.Document): A protobuf for a document that - was created in a ``CreateDocument`` RPC. - expected_prefix (str): The expected collection prefix for the - fully-qualified document name. - - Returns: - str: The document ID from the protobuf. - - Raises: - ValueError: If the name does not begin with the prefix. - """ - prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1) - if prefix != expected_prefix: - raise ValueError( - "Unexpected document name", - document_pb.name, - "Expected to begin with", - expected_prefix, - ) - - return document_id - - -_EmptyDict = transforms.Sentinel("Marker for an empty dict value") - - -def extract_fields(document_data, prefix_path, expand_dots=False): - """Do depth-first walk of tree, yielding field_path, value""" - if not document_data: - yield prefix_path, _EmptyDict - else: - for key, value in sorted(six.iteritems(document_data)): - - if expand_dots: - sub_key = FieldPath.from_string(key) - else: - sub_key = FieldPath(key) - - field_path = FieldPath(*(prefix_path.parts + sub_key.parts)) - - if isinstance(value, dict): - for s_path, s_value in extract_fields(value, field_path): - yield s_path, s_value - else: - yield field_path, value - - -def set_field_value(document_data, field_path, value): - """Set a value into a document for a field_path""" - current = document_data - for element in field_path.parts[:-1]: - current = current.setdefault(element, {}) - if value is _EmptyDict: - value = {} - current[field_path.parts[-1]] = value - - -def get_field_value(document_data, field_path): - if not field_path.parts: - raise ValueError("Empty path") - - current = document_data - for element in field_path.parts[:-1]: - current = current[element] - return current[field_path.parts[-1]] - - -class DocumentExtractor(object): - """ Break document data up into actual data and transforms. - - Handle special values such as ``DELETE_FIELD``, ``SERVER_TIMESTAMP``. - - Args: - document_data (dict): - Property names and values to use for sending a change to - a document. - """ - - def __init__(self, document_data): - self.document_data = document_data - self.field_paths = [] - self.deleted_fields = [] - self.server_timestamps = [] - self.array_removes = {} - self.array_unions = {} - self.increments = {} - self.minimums = {} - self.maximums = {} - self.set_fields = {} - self.empty_document = False - - prefix_path = FieldPath() - iterator = self._get_document_iterator(prefix_path) - - for field_path, value in iterator: - - if field_path == prefix_path and value is _EmptyDict: - self.empty_document = True - - elif value is transforms.DELETE_FIELD: - self.deleted_fields.append(field_path) - - elif value is transforms.SERVER_TIMESTAMP: - self.server_timestamps.append(field_path) - - elif isinstance(value, transforms.ArrayRemove): - self.array_removes[field_path] = value.values - - elif isinstance(value, transforms.ArrayUnion): - self.array_unions[field_path] = value.values - - elif isinstance(value, transforms.Increment): - self.increments[field_path] = value.value - - elif isinstance(value, transforms.Maximum): - self.maximums[field_path] = value.value - - elif isinstance(value, transforms.Minimum): - self.minimums[field_path] = value.value - - else: - self.field_paths.append(field_path) - set_field_value(self.set_fields, field_path, value) - - def _get_document_iterator(self, prefix_path): - return extract_fields(self.document_data, prefix_path) - - @property - def has_transforms(self): - return bool( - self.server_timestamps - or self.array_removes - or self.array_unions - or self.increments - or self.maximums - or self.minimums - ) - - @property - def transform_paths(self): - return sorted( - self.server_timestamps - + list(self.array_removes) - + list(self.array_unions) - + list(self.increments) - + list(self.maximums) - + list(self.minimums) - ) - - def _get_update_mask(self, allow_empty_mask=False): - return None - - def get_update_pb(self, document_path, exists=None, allow_empty_mask=False): - - if exists is not None: - current_document = common_pb2.Precondition(exists=exists) - else: - current_document = None - - update_pb = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=encode_dict(self.set_fields) - ), - update_mask=self._get_update_mask(allow_empty_mask), - current_document=current_document, - ) - - return update_pb - - def get_transform_pb(self, document_path, exists=None): - def make_array_value(values): - value_list = [encode_value(element) for element in values] - return document_pb2.ArrayValue(values=value_list) - - path_field_transforms = ( - [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - set_to_server_value=REQUEST_TIME_ENUM, - ), - ) - for path in self.server_timestamps - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - remove_all_from_array=make_array_value(values), - ), - ) - for path, values in self.array_removes.items() - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - append_missing_elements=make_array_value(values), - ), - ) - for path, values in self.array_unions.items() - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), increment=encode_value(value) - ), - ) - for path, value in self.increments.items() - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), maximum=encode_value(value) - ), - ) - for path, value in self.maximums.items() - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), minimum=encode_value(value) - ), - ) - for path, value in self.minimums.items() - ] - ) - field_transforms = [ - transform for path, transform in sorted(path_field_transforms) - ] - transform_pb = write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=field_transforms - ) - ) - if exists is not None: - transform_pb.current_document.CopyFrom( - common_pb2.Precondition(exists=exists) - ) - - return transform_pb - - -def pbs_for_create(document_path, document_data): - """Make ``Write`` protobufs for ``create()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - creating a document. - - Returns: - List[google.cloud.firestore_v1.types.Write]: One or two - ``Write`` protobuf instances for ``create()``. - """ - extractor = DocumentExtractor(document_data) - - if extractor.deleted_fields: - raise ValueError("Cannot apply DELETE_FIELD in a create request.") - - write_pbs = [] - - # Conformance tests require skipping the 'update_pb' if the document - # contains only transforms. - if extractor.empty_document or extractor.set_fields: - write_pbs.append(extractor.get_update_pb(document_path, exists=False)) - - if extractor.has_transforms: - exists = None if write_pbs else False - transform_pb = extractor.get_transform_pb(document_path, exists) - write_pbs.append(transform_pb) - - return write_pbs - - -def pbs_for_set_no_merge(document_path, document_data): - """Make ``Write`` protobufs for ``set()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - replacing a document. - - Returns: - List[google.cloud.firestore_v1.types.Write]: One - or two ``Write`` protobuf instances for ``set()``. - """ - extractor = DocumentExtractor(document_data) - - if extractor.deleted_fields: - raise ValueError( - "Cannot apply DELETE_FIELD in a set request without " - "specifying 'merge=True' or 'merge=[field_paths]'." - ) - - # Conformance tests require send the 'update_pb' even if the document - # contains only transforms. - write_pbs = [extractor.get_update_pb(document_path)] - - if extractor.has_transforms: - transform_pb = extractor.get_transform_pb(document_path) - write_pbs.append(transform_pb) - - return write_pbs - - -class DocumentExtractorForMerge(DocumentExtractor): - """ Break document data up into actual data and transforms. - """ - - def __init__(self, document_data): - super(DocumentExtractorForMerge, self).__init__(document_data) - self.data_merge = [] - self.transform_merge = [] - self.merge = [] - - @property - def has_updates(self): - # for whatever reason, the conformance tests want to see the parent - # of nested transform paths in the update mask - # (see set-st-merge-nonleaf-alone.textproto) - update_paths = set(self.data_merge) - - for transform_path in self.transform_paths: - if len(transform_path.parts) > 1: - parent_fp = FieldPath(*transform_path.parts[:-1]) - update_paths.add(parent_fp) - - return bool(update_paths) - - def _apply_merge_all(self): - self.data_merge = sorted(self.field_paths + self.deleted_fields) - # TODO: other transforms - self.transform_merge = self.transform_paths - self.merge = sorted(self.data_merge + self.transform_paths) - - def _construct_merge_paths(self, merge): - for merge_field in merge: - if isinstance(merge_field, FieldPath): - yield merge_field - else: - yield FieldPath(*parse_field_path(merge_field)) - - def _normalize_merge_paths(self, merge): - merge_paths = sorted(self._construct_merge_paths(merge)) - - # Raise if any merge path is a parent of another. Leverage sorting - # to avoid quadratic behavior. - for index in range(len(merge_paths) - 1): - lhs, rhs = merge_paths[index], merge_paths[index + 1] - if lhs.eq_or_parent(rhs): - raise ValueError("Merge paths overlap: {}, {}".format(lhs, rhs)) - - for merge_path in merge_paths: - if merge_path in self.deleted_fields: - continue - try: - get_field_value(self.document_data, merge_path) - except KeyError: - raise ValueError("Invalid merge path: {}".format(merge_path)) - - return merge_paths - - def _apply_merge_paths(self, merge): - - if self.empty_document: - raise ValueError("Cannot merge specific fields with empty document.") - - merge_paths = self._normalize_merge_paths(merge) - - del self.data_merge[:] - del self.transform_merge[:] - self.merge = merge_paths - - for merge_path in merge_paths: - - if merge_path in self.transform_paths: - self.transform_merge.append(merge_path) - - for field_path in self.field_paths: - if merge_path.eq_or_parent(field_path): - self.data_merge.append(field_path) - - # Clear out data for fields not merged. - merged_set_fields = {} - for field_path in self.data_merge: - value = get_field_value(self.document_data, field_path) - set_field_value(merged_set_fields, field_path, value) - self.set_fields = merged_set_fields - - unmerged_deleted_fields = [ - field_path - for field_path in self.deleted_fields - if field_path not in self.merge - ] - if unmerged_deleted_fields: - raise ValueError( - "Cannot delete unmerged fields: {}".format(unmerged_deleted_fields) - ) - self.data_merge = sorted(self.data_merge + self.deleted_fields) - - # Keep only transforms which are within merge. - merged_transform_paths = set() - for merge_path in self.merge: - tranform_merge_paths = [ - transform_path - for transform_path in self.transform_paths - if merge_path.eq_or_parent(transform_path) - ] - merged_transform_paths.update(tranform_merge_paths) - - self.server_timestamps = [ - path for path in self.server_timestamps if path in merged_transform_paths - ] - - self.array_removes = { - path: values - for path, values in self.array_removes.items() - if path in merged_transform_paths - } - - self.array_unions = { - path: values - for path, values in self.array_unions.items() - if path in merged_transform_paths - } - - def apply_merge(self, merge): - if merge is True: # merge all fields - self._apply_merge_all() - else: - self._apply_merge_paths(merge) - - def _get_update_mask(self, allow_empty_mask=False): - # Mask uses dotted / quoted paths. - mask_paths = [ - field_path.to_api_repr() - for field_path in self.merge - if field_path not in self.transform_merge - ] - - if mask_paths or allow_empty_mask: - return common_pb2.DocumentMask(field_paths=mask_paths) - - -def pbs_for_set_with_merge(document_path, document_data, merge): - """Make ``Write`` protobufs for ``set()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - replacing a document. - merge (Optional[bool] or Optional[List]): - If True, merge all fields; else, merge only the named fields. - - Returns: - List[google.cloud.firestore_v1.types.Write]: One - or two ``Write`` protobuf instances for ``set()``. - """ - extractor = DocumentExtractorForMerge(document_data) - extractor.apply_merge(merge) - - merge_empty = not document_data - - write_pbs = [] - - if extractor.has_updates or merge_empty: - write_pbs.append( - extractor.get_update_pb(document_path, allow_empty_mask=merge_empty) - ) - - if extractor.transform_paths: - transform_pb = extractor.get_transform_pb(document_path) - write_pbs.append(transform_pb) - - return write_pbs - - -class DocumentExtractorForUpdate(DocumentExtractor): - """ Break document data up into actual data and transforms. - """ - - def __init__(self, document_data): - super(DocumentExtractorForUpdate, self).__init__(document_data) - self.top_level_paths = sorted( - [FieldPath.from_string(key) for key in document_data] - ) - tops = set(self.top_level_paths) - for top_level_path in self.top_level_paths: - for ancestor in top_level_path.lineage(): - if ancestor in tops: - raise ValueError( - "Conflicting field path: {}, {}".format( - top_level_path, ancestor - ) - ) - - for field_path in self.deleted_fields: - if field_path not in tops: - raise ValueError( - "Cannot update with nest delete: {}".format(field_path) - ) - - def _get_document_iterator(self, prefix_path): - return extract_fields(self.document_data, prefix_path, expand_dots=True) - - def _get_update_mask(self, allow_empty_mask=False): - mask_paths = [] - for field_path in self.top_level_paths: - if field_path not in self.transform_paths: - mask_paths.append(field_path.to_api_repr()) - - return common_pb2.DocumentMask(field_paths=mask_paths) - - -def pbs_for_update(document_path, field_updates, option): - """Make ``Write`` protobufs for ``update()`` methods. - - Args: - document_path (str): A fully-qualified document path. - field_updates (dict): Field names or paths to update and values - to update with. - option (optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - List[google.cloud.firestore_v1.types.Write]: One - or two ``Write`` protobuf instances for ``update()``. - """ - extractor = DocumentExtractorForUpdate(field_updates) - - if extractor.empty_document: - raise ValueError("Cannot update with an empty document.") - - if option is None: # Default is to use ``exists=True``. - option = ExistsOption(exists=True) - - write_pbs = [] - - if extractor.field_paths or extractor.deleted_fields: - update_pb = extractor.get_update_pb(document_path) - option.modify_write(update_pb) - write_pbs.append(update_pb) - - if extractor.has_transforms: - transform_pb = extractor.get_transform_pb(document_path) - if not write_pbs: - # NOTE: set the write option on the ``transform_pb`` only if there - # is no ``update_pb`` - option.modify_write(transform_pb) - write_pbs.append(transform_pb) - - return write_pbs - - -def pb_for_delete(document_path, option): - """Make a ``Write`` protobuf for ``delete()`` methods. - - Args: - document_path (str): A fully-qualified document path. - option (optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - google.cloud.firestore_v1.types.Write: A - ``Write`` protobuf instance for the ``delete()``. - """ - write_pb = write_pb2.Write(delete=document_path) - if option is not None: - option.modify_write(write_pb) - - return write_pb - - -class ReadAfterWriteError(Exception): - """Raised when a read is attempted after a write. - - Raised by "read" methods that use transactions. - """ - - -def get_transaction_id(transaction, read_operation=True): - """Get the transaction ID from a ``Transaction`` object. - - Args: - transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.\ - Transaction`]): - An existing transaction that this query will run in. - read_operation (Optional[bool]): Indicates if the transaction ID - will be used in a read operation. Defaults to :data:`True`. - - Returns: - Optional[bytes]: The ID of the transaction, or :data:`None` if the - ``transaction`` is :data:`None`. - - Raises: - ValueError: If the ``transaction`` is not in progress (only if - ``transaction`` is not :data:`None`). - ReadAfterWriteError: If the ``transaction`` has writes stored on - it and ``read_operation`` is :data:`True`. - """ - if transaction is None: - return None - else: - if not transaction.in_progress: - raise ValueError(INACTIVE_TXN) - if read_operation and len(transaction._write_pbs) > 0: - raise ReadAfterWriteError(READ_AFTER_WRITE_ERROR) - return transaction.id - - -def metadata_with_prefix(prefix, **kw): - """Create RPC metadata containing a prefix. - - Args: - prefix (str): appropriate resource path. - - Returns: - List[Tuple[str, str]]: RPC metadata with supplied prefix - """ - return [("google-cloud-resource-prefix", prefix)] - - -class WriteOption(object): - """Option used to assert a condition on a write operation.""" - - def modify_write(self, write_pb, no_create_msg=None): - """Modify a ``Write`` protobuf based on the state of this write option. - - This is a virtual method intended to be implemented by subclasses. - - Args: - write_pb (google.cloud.firestore_v1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - no_create_msg (Optional[str]): A message to use to indicate that - a create operation is not allowed. - - Raises: - NotImplementedError: Always, this method is virtual. - """ - raise NotImplementedError - - -class LastUpdateOption(WriteOption): - """Option used to assert a "last update" condition on a write operation. - - This will typically be created by - :meth:`~google.cloud.firestore_v1.client.Client.write_option`. - - Args: - last_update_time (google.protobuf.timestamp_pb2.Timestamp): A - timestamp. When set, the target document must exist and have - been last updated at that time. Protobuf ``update_time`` timestamps - are typically returned from methods that perform write operations - as part of a "write result" protobuf or directly. - """ - - def __init__(self, last_update_time): - self._last_update_time = last_update_time - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._last_update_time == other._last_update_time - - def modify_write(self, write_pb, **unused_kwargs): - """Modify a ``Write`` protobuf based on the state of this write option. - - The ``last_update_time`` is added to ``write_pb`` as an "update time" - precondition. When set, the target document must exist and have been - last updated at that time. - - Args: - write_pb (google.cloud.firestore_v1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - unused_kwargs (Dict[str, Any]): Keyword arguments accepted by - other subclasses that are unused here. - """ - current_doc = types.Precondition(update_time=self._last_update_time) - write_pb.current_document.CopyFrom(current_doc) - - -class ExistsOption(WriteOption): - """Option used to assert existence on a write operation. - - This will typically be created by - :meth:`~google.cloud.firestore_v1.client.Client.write_option`. - - Args: - exists (bool): Indicates if the document being modified - should already exist. - """ - - def __init__(self, exists): - self._exists = exists - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._exists == other._exists - - def modify_write(self, write_pb, **unused_kwargs): - """Modify a ``Write`` protobuf based on the state of this write option. - - If: - - * ``exists=True``, adds a precondition that requires existence - * ``exists=False``, adds a precondition that requires non-existence - - Args: - write_pb (google.cloud.firestore_v1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - unused_kwargs (Dict[str, Any]): Keyword arguments accepted by - other subclasses that are unused here. - """ - current_doc = types.Precondition(exists=self._exists) - write_pb.current_document.CopyFrom(current_doc) diff --git a/firestore/google/cloud/firestore_v1/batch.py b/firestore/google/cloud/firestore_v1/batch.py deleted file mode 100644 index 56483af10c72..000000000000 --- a/firestore/google/cloud/firestore_v1/batch.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpers for batch requests to the Google Cloud Firestore API.""" - - -from google.cloud.firestore_v1 import _helpers - - -class WriteBatch(object): - """Accumulate write operations to be sent in a batch. - - This has the same set of methods for write operations that - :class:`~google.cloud.firestore_v1.document.DocumentReference` does, - e.g. :meth:`~google.cloud.firestore_v1.document.DocumentReference.create`. - - Args: - client (:class:`~google.cloud.firestore_v1.client.Client`): - The client that created this batch. - """ - - def __init__(self, client): - self._client = client - self._write_pbs = [] - self.write_results = None - self.commit_time = None - - def _add_write_pbs(self, write_pbs): - """Add `Write`` protobufs to this transaction. - - This method intended to be over-ridden by subclasses. - - Args: - write_pbs (List[google.cloud.proto.firestore.v1.\ - write_pb2.Write]): A list of write protobufs to be added. - """ - self._write_pbs.extend(write_pbs) - - def create(self, reference, document_data): - """Add a "change" to this batch to create a document. - - If the document given by ``reference`` already exists, then this - batch will fail when :meth:`commit`-ed. - - Args: - reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`): - A document reference to be created in this batch. - document_data (dict): Property names and values to use for - creating a document. - """ - write_pbs = _helpers.pbs_for_create(reference._document_path, document_data) - self._add_write_pbs(write_pbs) - - def set(self, reference, document_data, merge=False): - """Add a "change" to replace a document. - - See - :meth:`google.cloud.firestore_v1.document.DocumentReference.set` for - more information on how ``option`` determines how the change is - applied. - - Args: - reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`): - A document reference that will have values set in this batch. - document_data (dict): - Property names and values to use for replacing a document. - merge (Optional[bool] or Optional[List]): - If True, apply merging instead of overwriting the state - of the document. - """ - if merge is not False: - write_pbs = _helpers.pbs_for_set_with_merge( - reference._document_path, document_data, merge - ) - else: - write_pbs = _helpers.pbs_for_set_no_merge( - reference._document_path, document_data - ) - - self._add_write_pbs(write_pbs) - - def update(self, reference, field_updates, option=None): - """Add a "change" to update a document. - - See - :meth:`google.cloud.firestore_v1.document.DocumentReference.update` - for more information on ``field_updates`` and ``option``. - - Args: - reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`): - A document reference that will be updated in this batch. - field_updates (dict): - Field names or paths to update and values to update with. - option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - """ - if option.__class__.__name__ == "ExistsOption": - raise ValueError("you must not pass an explicit write option to " "update.") - write_pbs = _helpers.pbs_for_update( - reference._document_path, field_updates, option - ) - self._add_write_pbs(write_pbs) - - def delete(self, reference, option=None): - """Add a "change" to delete a document. - - See - :meth:`google.cloud.firestore_v1.document.DocumentReference.delete` - for more information on how ``option`` determines how the change is - applied. - - Args: - reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`): - A document reference that will be deleted in this batch. - option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - """ - write_pb = _helpers.pb_for_delete(reference._document_path, option) - self._add_write_pbs([write_pb]) - - def commit(self): - """Commit the changes accumulated in this batch. - - Returns: - List[:class:`google.cloud.proto.firestore.v1.write_pb2.WriteResult`, ...]: - The write results corresponding to the changes committed, returned - in the same order as the changes were applied to this batch. A - write result contains an ``update_time`` field. - """ - commit_response = self._client._firestore_api.commit( - self._client._database_string, - self._write_pbs, - transaction=None, - metadata=self._client._rpc_metadata, - ) - - self._write_pbs = [] - self.write_results = results = list(commit_response.write_results) - self.commit_time = commit_response.commit_time - return results - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - if exc_type is None: - self.commit() diff --git a/firestore/google/cloud/firestore_v1/client.py b/firestore/google/cloud/firestore_v1/client.py deleted file mode 100644 index da09b9ff4415..000000000000 --- a/firestore/google/cloud/firestore_v1/client.py +++ /dev/null @@ -1,619 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud Firestore API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`~google.cloud.firestore_v1.client.Client` owns a - :class:`~google.cloud.firestore_v1.collection.CollectionReference` -* a :class:`~google.cloud.firestore_v1.client.Client` owns a - :class:`~google.cloud.firestore_v1.document.DocumentReference` -""" -import os - -import google.api_core.client_options -from google.api_core.gapic_v1 import client_info -from google.cloud.client import ClientWithProject - -from google.cloud.firestore_v1 import _helpers -from google.cloud.firestore_v1 import __version__ -from google.cloud.firestore_v1 import query -from google.cloud.firestore_v1 import types -from google.cloud.firestore_v1.batch import WriteBatch -from google.cloud.firestore_v1.collection import CollectionReference -from google.cloud.firestore_v1.document import DocumentReference -from google.cloud.firestore_v1.document import DocumentSnapshot -from google.cloud.firestore_v1.field_path import render_field_path -from google.cloud.firestore_v1.gapic import firestore_client -from google.cloud.firestore_v1.gapic.transports import firestore_grpc_transport -from google.cloud.firestore_v1.transaction import Transaction - - -DEFAULT_DATABASE = "(default)" -"""str: The default database used in a :class:`~google.cloud.firestore_v1.client.Client`.""" -_BAD_OPTION_ERR = ( - "Exactly one of ``last_update_time`` or ``exists`` " "must be provided." -) -_BAD_DOC_TEMPLATE = ( - "Document {!r} appeared in response but was not present among references" -) -_ACTIVE_TXN = "There is already an active transaction." -_INACTIVE_TXN = "There is no active transaction." -_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) -_FIRESTORE_EMULATOR_HOST = "FIRESTORE_EMULATOR_HOST" - - -class Client(ClientWithProject): - """Client for interacting with Google Cloud Firestore API. - - .. note:: - - Since the Cloud Firestore API requires the gRPC transport, no - ``_http`` argument is accepted by this class. - - Args: - project (Optional[str]): The project which the client acts on behalf - of. If not passed, falls back to the default inferred - from the environment. - credentials (Optional[~google.auth.credentials.Credentials]): The - OAuth2 Credentials to use for this client. If not passed, falls - back to the default inferred from the environment. - database (Optional[str]): The database name that the client targets. - For now, :attr:`DEFAULT_DATABASE` (the default value) is the - only valid database. - client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]): - The client info used to send a user-agent string along with API - requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library - or partner tool. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - - SCOPE = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/datastore", - ) - """The scopes required for authenticating with the Firestore service.""" - - _firestore_api_internal = None - _database_string_internal = None - _rpc_metadata_internal = None - - def __init__( - self, - project=None, - credentials=None, - database=DEFAULT_DATABASE, - client_info=_CLIENT_INFO, - client_options=None, - ): - # NOTE: This API has no use for the _http argument, but sending it - # will have no impact since the _http() @property only lazily - # creates a working HTTP object. - super(Client, self).__init__( - project=project, credentials=credentials, _http=None - ) - self._client_info = client_info - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - self._client_options = client_options - - self._database = database - self._emulator_host = os.getenv(_FIRESTORE_EMULATOR_HOST) - - @property - def _firestore_api(self): - """Lazy-loading getter GAPIC Firestore API. - - Returns: - :class:`~google.cloud.gapic.firestore.v1`.firestore_client.FirestoreClient: - >> client.collection('top') - - For a sub-collection: - - .. code-block:: python - - >>> client.collection('mydocs/doc/subcol') - >>> # is the same as - >>> client.collection('mydocs', 'doc', 'subcol') - - Sub-collections can be nested deeper in a similar fashion. - - Args: - collection_path (Tuple[str, ...]): Can either be - - * A single ``/``-delimited path to a collection - * A tuple of collection path segments - - Returns: - :class:`~google.cloud.firestore_v1.collection.CollectionReference`: - A reference to a collection in the Firestore database. - """ - if len(collection_path) == 1: - path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER) - else: - path = collection_path - - return CollectionReference(*path, client=self) - - def collection_group(self, collection_id): - """ - Creates and returns a new Query that includes all documents in the - database that are contained in a collection or subcollection with the - given collection_id. - - .. code-block:: python - - >>> query = client.collection_group('mygroup') - - @param {string} collectionId Identifies the collections to query over. - Every collection or subcollection with this ID as the last segment of its - path will be included. Cannot contain a slash. - @returns {Query} The created Query. - """ - if "/" in collection_id: - raise ValueError( - "Invalid collection_id " - + collection_id - + ". Collection IDs must not contain '/'." - ) - - collection = self.collection(collection_id) - return query.Query(collection, all_descendants=True) - - def document(self, *document_path): - """Get a reference to a document in a collection. - - For a top-level document: - - .. code-block:: python - - >>> client.document('collek/shun') - >>> # is the same as - >>> client.document('collek', 'shun') - - For a document in a sub-collection: - - .. code-block:: python - - >>> client.document('mydocs/doc/subcol/child') - >>> # is the same as - >>> client.document('mydocs', 'doc', 'subcol', 'child') - - Documents in sub-collections can be nested deeper in a similar fashion. - - Args: - document_path (Tuple[str, ...]): Can either be - - * A single ``/``-delimited path to a document - * A tuple of document path segments - - Returns: - :class:`~google.cloud.firestore_v1.document.DocumentReference`: - A reference to a document in a collection. - """ - if len(document_path) == 1: - path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER) - else: - path = document_path - - # DocumentReference takes a relative path. Strip the database string if present. - base_path = self._database_string + "/documents/" - joined_path = _helpers.DOCUMENT_PATH_DELIMITER.join(path) - if joined_path.startswith(base_path): - joined_path = joined_path[len(base_path) :] - path = joined_path.split(_helpers.DOCUMENT_PATH_DELIMITER) - - return DocumentReference(*path, client=self) - - @staticmethod - def field_path(*field_names): - """Create a **field path** from a list of nested field names. - - A **field path** is a ``.``-delimited concatenation of the field - names. It is used to represent a nested field. For example, - in the data - - .. code-block:: python - - data = { - 'aa': { - 'bb': { - 'cc': 10, - }, - }, - } - - the field path ``'aa.bb.cc'`` represents the data stored in - ``data['aa']['bb']['cc']``. - - Args: - field_names (Tuple[str, ...]): The list of field names. - - Returns: - str: The ``.``-delimited field path. - """ - return render_field_path(field_names) - - @staticmethod - def write_option(**kwargs): - """Create a write option for write operations. - - Write operations include :meth:`~google.cloud.DocumentReference.set`, - :meth:`~google.cloud.DocumentReference.update` and - :meth:`~google.cloud.DocumentReference.delete`. - - One of the following keyword arguments must be provided: - - * ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\ - Timestamp`): A timestamp. When set, the target document must - exist and have been last updated at that time. Protobuf - ``update_time`` timestamps are typically returned from methods - that perform write operations as part of a "write result" - protobuf or directly. - * ``exists`` (:class:`bool`): Indicates if the document being modified - should already exist. - - Providing no argument would make the option have no effect (so - it is not allowed). Providing multiple would be an apparent - contradiction, since ``last_update_time`` assumes that the - document **was** updated (it can't have been updated if it - doesn't exist) and ``exists`` indicate that it is unknown if the - document exists or not. - - Args: - kwargs (Dict[str, Any]): The keyword arguments described above. - - Raises: - TypeError: If anything other than exactly one argument is - provided by the caller. - - Returns: - :class:`~google.cloud.firestore_v1.client.WriteOption`: - The option to be used to configure a write message. - """ - if len(kwargs) != 1: - raise TypeError(_BAD_OPTION_ERR) - - name, value = kwargs.popitem() - if name == "last_update_time": - return _helpers.LastUpdateOption(value) - elif name == "exists": - return _helpers.ExistsOption(value) - else: - extra = "{!r} was provided".format(name) - raise TypeError(_BAD_OPTION_ERR, extra) - - def get_all(self, references, field_paths=None, transaction=None): - """Retrieve a batch of documents. - - .. note:: - - Documents returned by this method are not guaranteed to be - returned in the same order that they are given in ``references``. - - .. note:: - - If multiple ``references`` refer to the same document, the server - will only return one result. - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - references (List[.DocumentReference, ...]): Iterable of document - references to be retrieved. - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. If - no value is provided, all fields will be returned. - transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]): - An existing transaction that these ``references`` will be - retrieved in. - - Yields: - .DocumentSnapshot: The next document snapshot that fulfills the - query, or :data:`None` if the document does not exist. - """ - document_paths, reference_map = _reference_info(references) - mask = _get_doc_mask(field_paths) - response_iterator = self._firestore_api.batch_get_documents( - self._database_string, - document_paths, - mask, - transaction=_helpers.get_transaction_id(transaction), - metadata=self._rpc_metadata, - ) - - for get_doc_response in response_iterator: - yield _parse_batch_get(get_doc_response, reference_map, self) - - def collections(self): - """List top-level collections of the client's database. - - Returns: - Sequence[:class:`~google.cloud.firestore_v1.collection.CollectionReference`]: - iterator of subcollections of the current document. - """ - iterator = self._firestore_api.list_collection_ids( - "{}/documents".format(self._database_string), metadata=self._rpc_metadata - ) - iterator.client = self - iterator.item_to_value = _item_to_collection_ref - return iterator - - def batch(self): - """Get a batch instance from this client. - - Returns: - :class:`~google.cloud.firestore_v1.batch.WriteBatch`: - A "write" batch to be used for accumulating document changes and - sending the changes all at once. - """ - return WriteBatch(self) - - def transaction(self, **kwargs): - """Get a transaction that uses this client. - - See :class:`~google.cloud.firestore_v1.transaction.Transaction` for - more information on transactions and the constructor arguments. - - Args: - kwargs (Dict[str, Any]): The keyword arguments (other than - ``client``) to pass along to the - :class:`~google.cloud.firestore_v1.transaction.Transaction` - constructor. - - Returns: - :class:`~google.cloud.firestore_v1.transaction.Transaction`: - A transaction attached to this client. - """ - return Transaction(self, **kwargs) - - -def _reference_info(references): - """Get information about document references. - - Helper for :meth:`~google.cloud.firestore_v1.client.Client.get_all`. - - Args: - references (List[.DocumentReference, ...]): Iterable of document - references. - - Returns: - Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of - - * fully-qualified documents paths for each reference in ``references`` - * a mapping from the paths to the original reference. (If multiple - ``references`` contains multiple references to the same document, - that key will be overwritten in the result.) - """ - document_paths = [] - reference_map = {} - for reference in references: - doc_path = reference._document_path - document_paths.append(doc_path) - reference_map[doc_path] = reference - - return document_paths, reference_map - - -def _get_reference(document_path, reference_map): - """Get a document reference from a dictionary. - - This just wraps a simple dictionary look-up with a helpful error that is - specific to :meth:`~google.cloud.firestore.client.Client.get_all`, the - **public** caller of this function. - - Args: - document_path (str): A fully-qualified document path. - reference_map (Dict[str, .DocumentReference]): A mapping (produced - by :func:`_reference_info`) of fully-qualified document paths to - document references. - - Returns: - .DocumentReference: The matching reference. - - Raises: - ValueError: If ``document_path`` has not been encountered. - """ - try: - return reference_map[document_path] - except KeyError: - msg = _BAD_DOC_TEMPLATE.format(document_path) - raise ValueError(msg) - - -def _parse_batch_get(get_doc_response, reference_map, client): - """Parse a `BatchGetDocumentsResponse` protobuf. - - Args: - get_doc_response (~google.cloud.proto.firestore.v1.\ - firestore_pb2.BatchGetDocumentsResponse): A single response (from - a stream) containing the "get" response for a document. - reference_map (Dict[str, .DocumentReference]): A mapping (produced - by :func:`_reference_info`) of fully-qualified document paths to - document references. - client (:class:`~google.cloud.firestore_v1.client.Client`): - A client that has a document factory. - - Returns: - [.DocumentSnapshot]: The retrieved snapshot. - - Raises: - ValueError: If the response has a ``result`` field (a oneof) other - than ``found`` or ``missing``. - """ - result_type = get_doc_response.WhichOneof("result") - if result_type == "found": - reference = _get_reference(get_doc_response.found.name, reference_map) - data = _helpers.decode_dict(get_doc_response.found.fields, client) - snapshot = DocumentSnapshot( - reference, - data, - exists=True, - read_time=get_doc_response.read_time, - create_time=get_doc_response.found.create_time, - update_time=get_doc_response.found.update_time, - ) - elif result_type == "missing": - reference = _get_reference(get_doc_response.missing, reference_map) - snapshot = DocumentSnapshot( - reference, - None, - exists=False, - read_time=get_doc_response.read_time, - create_time=None, - update_time=None, - ) - else: - raise ValueError( - "`BatchGetDocumentsResponse.result` (a oneof) had a field other " - "than `found` or `missing` set, or was unset" - ) - return snapshot - - -def _get_doc_mask(field_paths): - """Get a document mask if field paths are provided. - - Args: - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. - - Returns: - Optional[google.cloud.firestore_v1.types.DocumentMask]: A mask - to project documents to a restricted set of field paths. - """ - if field_paths is None: - return None - else: - return types.DocumentMask(field_paths=field_paths) - - -def _item_to_collection_ref(iterator, item): - """Convert collection ID to collection ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (str): ID of the collection - """ - return iterator.client.collection(item) diff --git a/firestore/google/cloud/firestore_v1/collection.py b/firestore/google/cloud/firestore_v1/collection.py deleted file mode 100644 index 27c3eeaa3155..000000000000 --- a/firestore/google/cloud/firestore_v1/collection.py +++ /dev/null @@ -1,469 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Classes for representing collections for the Google Cloud Firestore API.""" -import random -import warnings - -import six - -from google.cloud.firestore_v1 import _helpers -from google.cloud.firestore_v1 import query as query_mod -from google.cloud.firestore_v1.watch import Watch -from google.cloud.firestore_v1 import document - -_AUTO_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - - -class CollectionReference(object): - """A reference to a collection in a Firestore database. - - The collection may already exist or this class can facilitate creation - of documents within the collection. - - Args: - path (Tuple[str, ...]): The components in the collection path. - This is a series of strings representing each collection and - sub-collection ID, as well as the document IDs for any documents - that contain a sub-collection. - kwargs (dict): The keyword arguments for the constructor. The only - supported keyword is ``client`` and it must be a - :class:`~google.cloud.firestore_v1.client.Client` if provided. It - represents the client that created this collection reference. - - Raises: - ValueError: if - - * the ``path`` is empty - * there are an even number of elements - * a collection ID in ``path`` is not a string - * a document ID in ``path`` is not a string - TypeError: If a keyword other than ``client`` is used. - """ - - def __init__(self, *path, **kwargs): - _helpers.verify_path(path, is_collection=True) - self._path = path - self._client = kwargs.pop("client", None) - if kwargs: - raise TypeError( - "Received unexpected arguments", kwargs, "Only `client` is supported" - ) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._path == other._path and self._client == other._client - - @property - def id(self): - """The collection identifier. - - Returns: - str: The last component of the path. - """ - return self._path[-1] - - @property - def parent(self): - """Document that owns the current collection. - - Returns: - Optional[:class:`~google.cloud.firestore_v1.document.DocumentReference`]: - The parent document, if the current collection is not a - top-level collection. - """ - if len(self._path) == 1: - return None - else: - parent_path = self._path[:-1] - return self._client.document(*parent_path) - - def document(self, document_id=None): - """Create a sub-document underneath the current collection. - - Args: - document_id (Optional[str]): The document identifier - within the current collection. If not provided, will default - to a random 20 character string composed of digits, - uppercase and lowercase and letters. - - Returns: - :class:`~google.cloud.firestore_v1.document.DocumentReference`: - The child document. - """ - if document_id is None: - document_id = _auto_id() - - child_path = self._path + (document_id,) - return self._client.document(*child_path) - - def _parent_info(self): - """Get fully-qualified parent path and prefix for this collection. - - Returns: - Tuple[str, str]: Pair of - - * the fully-qualified (with database and project) path to the - parent of this collection (will either be the database path - or a document path). - * the prefix to a document in this collection. - """ - parent_doc = self.parent - if parent_doc is None: - parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join( - (self._client._database_string, "documents") - ) - else: - parent_path = parent_doc._document_path - - expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id)) - return parent_path, expected_prefix - - def add(self, document_data, document_id=None): - """Create a document in the Firestore database with the provided data. - - Args: - document_data (dict): Property names and values to use for - creating the document. - document_id (Optional[str]): The document identifier within the - current collection. If not provided, an ID will be - automatically assigned by the server (the assigned ID will be - a random 20 character string composed of digits, - uppercase and lowercase letters). - - Returns: - Tuple[:class:`google.protobuf.timestamp_pb2.Timestamp`, \ - :class:`~google.cloud.firestore_v1.document.DocumentReference`]: - Pair of - - * The ``update_time`` when the document was created/overwritten. - * A document reference for the created document. - - Raises: - ~google.cloud.exceptions.Conflict: If ``document_id`` is provided - and the document already exists. - """ - if document_id is None: - document_id = _auto_id() - - document_ref = self.document(document_id) - write_result = document_ref.create(document_data) - return write_result.update_time, document_ref - - def list_documents(self, page_size=None): - """List all subdocuments of the current collection. - - Args: - page_size (Optional[int]]): The maximum number of documents - in each page of results from this request. Non-positive values - are ignored. Defaults to a sensible value set by the API. - - Returns: - Sequence[:class:`~google.cloud.firestore_v1.collection.DocumentReference`]: - iterator of subdocuments of the current collection. If the - collection does not exist at the time of `snapshot`, the - iterator will be empty - """ - parent, _ = self._parent_info() - - iterator = self._client._firestore_api.list_documents( - parent, - self.id, - page_size=page_size, - show_missing=True, - metadata=self._client._rpc_metadata, - ) - iterator.collection = self - iterator.item_to_value = _item_to_document_ref - return iterator - - def select(self, field_paths): - """Create a "select" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.select` for - more information on this method. - - Args: - field_paths (Iterable[str, ...]): An iterable of field paths - (``.``-delimited list of field names) to use as a projection - of document fields in the query results. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A "projected" query. - """ - query = query_mod.Query(self) - return query.select(field_paths) - - def where(self, field_path, op_string, value): - """Create a "where" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.where` for - more information on this method. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) for the field to filter on. - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=`` - and ``>``. - value (Any): The value to compare the field against in the filter. - If ``value`` is :data:`None` or a NaN, then ``==`` is the only - allowed operation. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A filtered query. - """ - query = query_mod.Query(self) - return query.where(field_path, op_string, value) - - def order_by(self, field_path, **kwargs): - """Create an "order by" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.order_by` for - more information on this method. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) on which to order the query results. - kwargs (Dict[str, Any]): The keyword arguments to pass along - to the query. The only supported keyword is ``direction``, - see :meth:`~google.cloud.firestore_v1.query.Query.order_by` - for more information. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - An "order by" query. - """ - query = query_mod.Query(self) - return query.order_by(field_path, **kwargs) - - def limit(self, count): - """Create a limited query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.limit` for - more information on this method. - - Args: - count (int): Maximum number of documents to return that match - the query. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A limited query. - """ - query = query_mod.Query(self) - return query.limit(count) - - def offset(self, num_to_skip): - """Skip to an offset in a query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.offset` for - more information on this method. - - Args: - num_to_skip (int): The number of results to skip at the beginning - of query results. (Must be non-negative.) - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - An offset query. - """ - query = query_mod.Query(self) - return query.offset(num_to_skip) - - def start_at(self, document_fields): - """Start query at a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.start_at` for - more information on this method. - - Args: - document_fields (Union[:class:`~google.cloud.firestore_v1.\ - document.DocumentSnapshot`, dict, list, tuple]): - A document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. - """ - query = query_mod.Query(self) - return query.start_at(document_fields) - - def start_after(self, document_fields): - """Start query after a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.start_after` for - more information on this method. - - Args: - document_fields (Union[:class:`~google.cloud.firestore_v1.\ - document.DocumentSnapshot`, dict, list, tuple]): - A document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. - """ - query = query_mod.Query(self) - return query.start_after(document_fields) - - def end_before(self, document_fields): - """End query before a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.end_before` for - more information on this method. - - Args: - document_fields (Union[:class:`~google.cloud.firestore_v1.\ - document.DocumentSnapshot`, dict, list, tuple]): - A document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. - """ - query = query_mod.Query(self) - return query.end_before(document_fields) - - def end_at(self, document_fields): - """End query at a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1.query.Query.end_at` for - more information on this method. - - Args: - document_fields (Union[:class:`~google.cloud.firestore_v1.\ - document.DocumentSnapshot`, dict, list, tuple]): - A document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. - """ - query = query_mod.Query(self) - return query.end_at(document_fields) - - def get(self, transaction=None): - """Deprecated alias for :meth:`stream`.""" - warnings.warn( - "'Collection.get' is deprecated: please use 'Collection.stream' instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.stream(transaction=transaction) - - def stream(self, transaction=None): - """Read the documents in this collection. - - This sends a ``RunQuery`` RPC and then returns an iterator which - consumes each document returned in the stream of ``RunQueryResponse`` - messages. - - .. note:: - - The underlying stream of responses will time out after - the ``max_rpc_timeout_millis`` value set in the GAPIC - client configuration for the ``RunQuery`` API. Snapshots - not consumed from the iterator before that point will be lost. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.\ - Transaction`]): - An existing transaction that the query will run in. - - Yields: - :class:`~google.cloud.firestore_v1.document.DocumentSnapshot`: - The next document that fulfills the query. - """ - query = query_mod.Query(self) - return query.stream(transaction=transaction) - - def on_snapshot(self, callback): - """Monitor the documents in this collection. - - This starts a watch on this collection using a background thread. The - provided callback is run on the snapshot of the documents. - - Args: - callback (Callable[[:class:`~google.cloud.firestore.collection.CollectionSnapshot`], NoneType]): - a callback to run when a change occurs. - - Example: - from google.cloud import firestore_v1 - - db = firestore_v1.Client() - collection_ref = db.collection(u'users') - - def on_snapshot(collection_snapshot, changes, read_time): - for doc in collection_snapshot.documents: - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - # Watch this collection - collection_watch = collection_ref.on_snapshot(on_snapshot) - - # Terminate this watch - collection_watch.unsubscribe() - """ - return Watch.for_query( - query_mod.Query(self), - callback, - document.DocumentSnapshot, - document.DocumentReference, - ) - - -def _auto_id(): - """Generate a "random" automatically generated ID. - - Returns: - str: A 20 character string composed of digits, uppercase and - lowercase and letters. - """ - return "".join(random.choice(_AUTO_ID_CHARS) for _ in six.moves.xrange(20)) - - -def _item_to_document_ref(iterator, item): - """Convert Document resource to document ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (dict): document resource - """ - document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] - return iterator.collection.document(document_id) diff --git a/firestore/google/cloud/firestore_v1/document.py b/firestore/google/cloud/firestore_v1/document.py deleted file mode 100644 index 571315e87563..000000000000 --- a/firestore/google/cloud/firestore_v1/document.py +++ /dev/null @@ -1,786 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Classes for representing documents for the Google Cloud Firestore API.""" - -import copy - -import six - -from google.api_core import exceptions -from google.cloud.firestore_v1 import _helpers -from google.cloud.firestore_v1 import field_path as field_path_module -from google.cloud.firestore_v1.proto import common_pb2 -from google.cloud.firestore_v1.watch import Watch - - -class DocumentReference(object): - """A reference to a document in a Firestore database. - - The document may already exist or can be created by this class. - - Args: - path (Tuple[str, ...]): The components in the document path. - This is a series of strings representing each collection and - sub-collection ID, as well as the document IDs for any documents - that contain a sub-collection (as well as the base document). - kwargs (dict): The keyword arguments for the constructor. The only - supported keyword is ``client`` and it must be a - :class:`~google.cloud.firestore_v1.client.Client`. It represents - the client that created this document reference. - - Raises: - ValueError: if - - * the ``path`` is empty - * there are an even number of elements - * a collection ID in ``path`` is not a string - * a document ID in ``path`` is not a string - TypeError: If a keyword other than ``client`` is used. - """ - - _document_path_internal = None - - def __init__(self, *path, **kwargs): - _helpers.verify_path(path, is_collection=False) - self._path = path - self._client = kwargs.pop("client", None) - if kwargs: - raise TypeError( - "Received unexpected arguments", kwargs, "Only `client` is supported" - ) - - def __copy__(self): - """Shallow copy the instance. - - We leave the client "as-is" but tuple-unpack the path. - - Returns: - .DocumentReference: A copy of the current document. - """ - result = self.__class__(*self._path, client=self._client) - result._document_path_internal = self._document_path_internal - return result - - def __deepcopy__(self, unused_memo): - """Deep copy the instance. - - This isn't a true deep copy, wee leave the client "as-is" but - tuple-unpack the path. - - Returns: - .DocumentReference: A copy of the current document. - """ - return self.__copy__() - - def __eq__(self, other): - """Equality check against another instance. - - Args: - other (Any): A value to compare against. - - Returns: - Union[bool, NotImplementedType]: Indicating if the values are - equal. - """ - if isinstance(other, DocumentReference): - return self._client == other._client and self._path == other._path - else: - return NotImplemented - - def __hash__(self): - return hash(self._path) + hash(self._client) - - def __ne__(self, other): - """Inequality check against another instance. - - Args: - other (Any): A value to compare against. - - Returns: - Union[bool, NotImplementedType]: Indicating if the values are - not equal. - """ - if isinstance(other, DocumentReference): - return self._client != other._client or self._path != other._path - else: - return NotImplemented - - @property - def path(self): - """Database-relative for this document. - - Returns: - str: The document's relative path. - """ - return "/".join(self._path) - - @property - def _document_path(self): - """Create and cache the full path for this document. - - Of the form: - - ``projects/{project_id}/databases/{database_id}/... - documents/{document_path}`` - - Returns: - str: The full document path. - - Raises: - ValueError: If the current document reference has no ``client``. - """ - if self._document_path_internal is None: - if self._client is None: - raise ValueError("A document reference requires a `client`.") - self._document_path_internal = _get_document_path(self._client, self._path) - - return self._document_path_internal - - @property - def id(self): - """The document identifier (within its collection). - - Returns: - str: The last component of the path. - """ - return self._path[-1] - - @property - def parent(self): - """Collection that owns the current document. - - Returns: - :class:`~google.cloud.firestore_v1.collection.CollectionReference`: - The parent collection. - """ - parent_path = self._path[:-1] - return self._client.collection(*parent_path) - - def collection(self, collection_id): - """Create a sub-collection underneath the current document. - - Args: - collection_id (str): The sub-collection identifier (sometimes - referred to as the "kind"). - - Returns: - :class:`~google.cloud.firestore_v1.collection.CollectionReference`: - The child collection. - """ - child_path = self._path + (collection_id,) - return self._client.collection(*child_path) - - def create(self, document_data): - """Create the current document in the Firestore database. - - Args: - document_data (dict): Property names and values to use for - creating a document. - - Returns: - :class:`~google.cloud.firestore_v1.types.WriteResult`: - The write result corresponding to the committed document. - A write result contains an ``update_time`` field. - - Raises: - :class:`~google.cloud.exceptions.Conflict`: - If the document already exists. - """ - batch = self._client.batch() - batch.create(self, document_data) - write_results = batch.commit() - return _first_write_result(write_results) - - def set(self, document_data, merge=False): - """Replace the current document in the Firestore database. - - A write ``option`` can be specified to indicate preconditions of - the "set" operation. If no ``option`` is specified and this document - doesn't exist yet, this method will create it. - - Overwrites all content for the document with the fields in - ``document_data``. This method performs almost the same functionality - as :meth:`create`. The only difference is that this method doesn't - make any requirements on the existence of the document (unless - ``option`` is used), whereas as :meth:`create` will fail if the - document already exists. - - Args: - document_data (dict): Property names and values to use for - replacing a document. - merge (Optional[bool] or Optional[List]): - If True, apply merging instead of overwriting the state - of the document. - - Returns: - :class:`~google.cloud.firestore_v1.types.WriteResult`: - The write result corresponding to the committed document. A write - result contains an ``update_time`` field. - """ - batch = self._client.batch() - batch.set(self, document_data, merge=merge) - write_results = batch.commit() - return _first_write_result(write_results) - - def update(self, field_updates, option=None): - """Update an existing document in the Firestore database. - - By default, this method verifies that the document exists on the - server before making updates. A write ``option`` can be specified to - override these preconditions. - - Each key in ``field_updates`` can either be a field name or a - **field path** (For more information on **field paths**, see - :meth:`~google.cloud.firestore_v1.client.Client.field_path`.) To - illustrate this, consider a document with - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - }, - 'other': True, - } - - stored on the server. If the field name is used in the update: - - .. code-block:: python - - >>> field_updates = { - ... 'foo': { - ... 'quux': 800, - ... }, - ... } - >>> document.update(field_updates) - - then all of ``foo`` will be overwritten on the server and the new - value will be - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'quux': 800, - }, - 'other': True, - } - - On the other hand, if a ``.``-delimited **field path** is used in the - update: - - .. code-block:: python - - >>> field_updates = { - ... 'foo.quux': 800, - ... } - >>> document.update(field_updates) - - then only ``foo.quux`` will be updated on the server and the - field ``foo.bar`` will remain intact: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - 'quux': 800, - }, - 'other': True, - } - - .. warning:: - - A **field path** can only be used as a top-level key in - ``field_updates``. - - To delete / remove a field from an existing document, use the - :attr:`~google.cloud.firestore_v1.transforms.DELETE_FIELD` sentinel. - So with the example above, sending - - .. code-block:: python - - >>> field_updates = { - ... 'other': firestore.DELETE_FIELD, - ... } - >>> document.update(field_updates) - - would update the value on the server to: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - }, - } - - To set a field to the current time on the server when the - update is received, use the - :attr:`~google.cloud.firestore_v1.transforms.SERVER_TIMESTAMP` - sentinel. - Sending - - .. code-block:: python - - >>> field_updates = { - ... 'foo.now': firestore.SERVER_TIMESTAMP, - ... } - >>> document.update(field_updates) - - would update the value on the server to: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - 'now': datetime.datetime(2012, ...), - }, - 'other': True, - } - - Args: - field_updates (dict): Field names or paths to update and values - to update with. - option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - :class:`~google.cloud.firestore_v1.types.WriteResult`: - The write result corresponding to the updated document. A write - result contains an ``update_time`` field. - - Raises: - ~google.cloud.exceptions.NotFound: If the document does not exist. - """ - batch = self._client.batch() - batch.update(self, field_updates, option=option) - write_results = batch.commit() - return _first_write_result(write_results) - - def delete(self, option=None): - """Delete the current document in the Firestore database. - - Args: - option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]): - A write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - :class:`google.protobuf.timestamp_pb2.Timestamp`: - The time that the delete request was received by the server. - If the document did not exist when the delete was sent (i.e. - nothing was deleted), this method will still succeed and will - still return the time that the request was received by the server. - """ - write_pb = _helpers.pb_for_delete(self._document_path, option) - commit_response = self._client._firestore_api.commit( - self._client._database_string, - [write_pb], - transaction=None, - metadata=self._client._rpc_metadata, - ) - - return commit_response.commit_time - - def get(self, field_paths=None, transaction=None): - """Retrieve a snapshot of the current document. - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. If - no value is provided, all fields will be returned. - transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]): - An existing transaction that this reference - will be retrieved in. - - Returns: - :class:`~google.cloud.firestore_v1.document.DocumentSnapshot`: - A snapshot of the current document. If the document does not - exist at the time of the snapshot is taken, the snapshot's - :attr:`reference`, :attr:`data`, :attr:`update_time`, and - :attr:`create_time` attributes will all be ``None`` and - its :attr:`exists` attribute will be ``False``. - """ - if isinstance(field_paths, six.string_types): - raise ValueError("'field_paths' must be a sequence of paths, not a string.") - - if field_paths is not None: - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - else: - mask = None - - firestore_api = self._client._firestore_api - try: - document_pb = firestore_api.get_document( - self._document_path, - mask=mask, - transaction=_helpers.get_transaction_id(transaction), - metadata=self._client._rpc_metadata, - ) - except exceptions.NotFound: - data = None - exists = False - create_time = None - update_time = None - else: - data = _helpers.decode_dict(document_pb.fields, self._client) - exists = True - create_time = document_pb.create_time - update_time = document_pb.update_time - - return DocumentSnapshot( - reference=self, - data=data, - exists=exists, - read_time=None, # No server read_time available - create_time=create_time, - update_time=update_time, - ) - - def collections(self, page_size=None): - """List subcollections of the current document. - - Args: - page_size (Optional[int]]): The maximum number of collections - in each page of results from this request. Non-positive values - are ignored. Defaults to a sensible value set by the API. - - Returns: - Sequence[:class:`~google.cloud.firestore_v1.collection.CollectionReference`]: - iterator of subcollections of the current document. If the - document does not exist at the time of `snapshot`, the - iterator will be empty - """ - iterator = self._client._firestore_api.list_collection_ids( - self._document_path, - page_size=page_size, - metadata=self._client._rpc_metadata, - ) - iterator.document = self - iterator.item_to_value = _item_to_collection_ref - return iterator - - def on_snapshot(self, callback): - """Watch this document. - - This starts a watch on this document using a background thread. The - provided callback is run on the snapshot. - - Args: - callback(Callable[[:class:`~google.cloud.firestore.document.DocumentSnapshot`], NoneType]): - a callback to run when a change occurs - - Example: - - .. code-block:: python - - from google.cloud import firestore_v1 - - db = firestore_v1.Client() - collection_ref = db.collection(u'users') - - def on_snapshot(document_snapshot, changes, read_time): - doc = document_snapshot - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - doc_ref = db.collection(u'users').document( - u'alovelace' + unique_resource_id()) - - # Watch this document - doc_watch = doc_ref.on_snapshot(on_snapshot) - - # Terminate this watch - doc_watch.unsubscribe() - """ - return Watch.for_document(self, callback, DocumentSnapshot, DocumentReference) - - -class DocumentSnapshot(object): - """A snapshot of document data in a Firestore database. - - This represents data retrieved at a specific time and may not contain - all fields stored for the document (i.e. a hand-picked selection of - fields may have been retrieved). - - Instances of this class are not intended to be constructed by hand, - rather they'll be returned as responses to various methods, such as - :meth:`~google.cloud.DocumentReference.get`. - - Args: - reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`): - A document reference corresponding to the document that contains - the data in this snapshot. - data (Dict[str, Any]): - The data retrieved in the snapshot. - exists (bool): - Indicates if the document existed at the time the snapshot was - retrieved. - read_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - The time that this snapshot was read from the server. - create_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - The time that this document was created. - update_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - The time that this document was last updated. - """ - - def __init__(self, reference, data, exists, read_time, create_time, update_time): - self._reference = reference - # We want immutable data, so callers can't modify this value - # out from under us. - self._data = copy.deepcopy(data) - self._exists = exists - self.read_time = read_time - self.create_time = create_time - self.update_time = update_time - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._reference == other._reference and self._data == other._data - - def __hash__(self): - seconds = self.update_time.seconds - nanos = self.update_time.nanos - return hash(self._reference) + hash(seconds) + hash(nanos) - - @property - def _client(self): - """The client that owns the document reference for this snapshot. - - Returns: - :class:`~google.cloud.firestore_v1.client.Client`: - The client that owns this document. - """ - return self._reference._client - - @property - def exists(self): - """Existence flag. - - Indicates if the document existed at the time this snapshot - was retrieved. - - Returns: - bool: The existence flag. - """ - return self._exists - - @property - def id(self): - """The document identifier (within its collection). - - Returns: - str: The last component of the path of the document. - """ - return self._reference.id - - @property - def reference(self): - """Document reference corresponding to document that owns this data. - - Returns: - :class:`~google.cloud.firestore_v1.document.DocumentReference`: - A document reference corresponding to this document. - """ - return self._reference - - def get(self, field_path): - """Get a value from the snapshot data. - - If the data is nested, for example: - - .. code-block:: python - - >>> snapshot.to_dict() - { - 'top1': { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - }, - 'top6': b'\x00\x01 foo', - } - - a **field path** can be used to access the nested data. For - example: - - .. code-block:: python - - >>> snapshot.get('top1') - { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - } - >>> snapshot.get('top1.middle2') - { - 'bottom3': 20, - 'bottom4': 22, - } - >>> snapshot.get('top1.middle2.bottom3') - 20 - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - A copy is returned since the data may contain mutable values, - but the data stored in the snapshot must remain immutable. - - Args: - field_path (str): A field path (``.``-delimited list of - field names). - - Returns: - Any or None: - (A copy of) the value stored for the ``field_path`` or - None if snapshot document does not exist. - - Raises: - KeyError: If the ``field_path`` does not match nested data - in the snapshot. - """ - if not self._exists: - return None - nested_data = field_path_module.get_nested_value(field_path, self._data) - return copy.deepcopy(nested_data) - - def to_dict(self): - """Retrieve the data contained in this snapshot. - - A copy is returned since the data may contain mutable values, - but the data stored in the snapshot must remain immutable. - - Returns: - Dict[str, Any] or None: - The data in the snapshot. Returns None if reference - does not exist. - """ - if not self._exists: - return None - return copy.deepcopy(self._data) - - -def _get_document_path(client, path): - """Convert a path tuple into a full path string. - - Of the form: - - ``projects/{project_id}/databases/{database_id}/... - documents/{document_path}`` - - Args: - client (:class:`~google.cloud.firestore_v1.client.Client`): - The client that holds configuration details and a GAPIC client - object. - path (Tuple[str, ...]): The components in a document path. - - Returns: - str: The fully-qualified document path. - """ - parts = (client._database_string, "documents") + path - return _helpers.DOCUMENT_PATH_DELIMITER.join(parts) - - -def _consume_single_get(response_iterator): - """Consume a gRPC stream that should contain a single response. - - The stream will correspond to a ``BatchGetDocuments`` request made - for a single document. - - Args: - response_iterator (~google.cloud.exceptions.GrpcRendezvous): A - streaming iterator returned from a ``BatchGetDocuments`` - request. - - Returns: - ~google.cloud.proto.firestore.v1.\ - firestore_pb2.BatchGetDocumentsResponse: The single "get" - response in the batch. - - Raises: - ValueError: If anything other than exactly one response is returned. - """ - # Calling ``list()`` consumes the entire iterator. - all_responses = list(response_iterator) - if len(all_responses) != 1: - raise ValueError( - "Unexpected response from `BatchGetDocumentsResponse`", - all_responses, - "Expected only one result", - ) - - return all_responses[0] - - -def _first_write_result(write_results): - """Get first write result from list. - - For cases where ``len(write_results) > 1``, this assumes the writes - occurred at the same time (e.g. if an update and transform are sent - at the same time). - - Args: - write_results (List[google.cloud.proto.firestore.v1.\ - write_pb2.WriteResult, ...]: The write results from a - ``CommitResponse``. - - Returns: - google.cloud.firestore_v1.types.WriteResult: The - lone write result from ``write_results``. - - Raises: - ValueError: If there are zero write results. This is likely to - **never** occur, since the backend should be stable. - """ - if not write_results: - raise ValueError("Expected at least one write result") - - return write_results[0] - - -def _item_to_collection_ref(iterator, item): - """Convert collection ID to collection ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (str): ID of the collection - """ - return iterator.document.collection(item) diff --git a/firestore/google/cloud/firestore_v1/field_path.py b/firestore/google/cloud/firestore_v1/field_path.py deleted file mode 100644 index 58b4f3b9acd3..000000000000 --- a/firestore/google/cloud/firestore_v1/field_path.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for managing / converting field paths to / from strings.""" - -try: - from collections import abc as collections_abc -except ImportError: # Python 2.7 - import collections as collections_abc - -import re - -import six - - -_FIELD_PATH_MISSING_TOP = "{!r} is not contained in the data" -_FIELD_PATH_MISSING_KEY = "{!r} is not contained in the data for the key {!r}" -_FIELD_PATH_WRONG_TYPE = ( - "The data at {!r} is not a dictionary, so it cannot contain the key {!r}" -) - -_FIELD_PATH_DELIMITER = "." -_BACKSLASH = "\\" -_ESCAPED_BACKSLASH = _BACKSLASH * 2 -_BACKTICK = "`" -_ESCAPED_BACKTICK = _BACKSLASH + _BACKTICK - -_SIMPLE_FIELD_NAME = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$") -_LEADING_ALPHA_INVALID = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*[^_a-zA-Z0-9]") -PATH_ELEMENT_TOKENS = [ - ("SIMPLE", r"[_a-zA-Z][_a-zA-Z0-9]*"), # unquoted elements - ("QUOTED", r"`(?:\\`|[^`])*?`"), # quoted elements, unquoted - ("DOT", r"\."), # separator -] -TOKENS_PATTERN = "|".join("(?P<{}>{})".format(*pair) for pair in PATH_ELEMENT_TOKENS) -TOKENS_REGEX = re.compile(TOKENS_PATTERN) - - -def _tokenize_field_path(path): - """Lex a field path into tokens (including dots). - - Args: - path (str): field path to be lexed. - Returns: - List(str): tokens - """ - pos = 0 - get_token = TOKENS_REGEX.match - match = get_token(path) - while match is not None: - type_ = match.lastgroup - value = match.group(type_) - yield value - pos = match.end() - match = get_token(path, pos) - if pos != len(path): - raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:])) - - -def split_field_path(path): - """Split a field path into valid elements (without dots). - - Args: - path (str): field path to be lexed. - Returns: - List(str): tokens - Raises: - ValueError: if the path does not match the elements-interspersed- - with-dots pattern. - """ - if not path: - return [] - - elements = [] - want_dot = False - - for element in _tokenize_field_path(path): - if want_dot: - if element != ".": - raise ValueError("Invalid path: {}".format(path)) - else: - want_dot = False - else: - if element == ".": - raise ValueError("Invalid path: {}".format(path)) - elements.append(element) - want_dot = True - - if not want_dot or not elements: - raise ValueError("Invalid path: {}".format(path)) - - return elements - - -def parse_field_path(api_repr): - """Parse a **field path** from into a list of nested field names. - - See :func:`field_path` for more on **field paths**. - - Args: - api_repr (str): - The unique Firestore api representation which consists of - either simple or UTF-8 field names. It cannot exceed - 1500 bytes, and cannot be empty. Simple field names match - ``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are - escaped by surrounding them with backticks. - - Returns: - List[str, ...]: The list of field names in the field path. - """ - # code dredged back up from - # https://github.com/googleapis/google-cloud-python/pull/5109/files - field_names = [] - for field_name in split_field_path(api_repr): - # non-simple field name - if field_name[0] == "`" and field_name[-1] == "`": - field_name = field_name[1:-1] - field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK) - field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH) - field_names.append(field_name) - return field_names - - -def render_field_path(field_names): - """Create a **field path** from a list of nested field names. - - A **field path** is a ``.``-delimited concatenation of the field - names. It is used to represent a nested field. For example, - in the data - - .. code-block: python - - data = { - 'aa': { - 'bb': { - 'cc': 10, - }, - }, - } - - the field path ``'aa.bb.cc'`` represents that data stored in - ``data['aa']['bb']['cc']``. - - Args: - field_names (Iterable[str, ...]): The list of field names. - - Returns: - str: The ``.``-delimited field path. - """ - result = [] - - for field_name in field_names: - match = _SIMPLE_FIELD_NAME.match(field_name) - if match and match.group(0) == field_name: - result.append(field_name) - else: - replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace( - _BACKTICK, _ESCAPED_BACKTICK - ) - result.append(_BACKTICK + replaced + _BACKTICK) - - return _FIELD_PATH_DELIMITER.join(result) - - -get_field_path = render_field_path # backward-compatibility - - -def get_nested_value(field_path, data): - """Get a (potentially nested) value from a dictionary. - - If the data is nested, for example: - - .. code-block:: python - - >>> data - { - 'top1': { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - }, - 'top6': b'\x00\x01 foo', - } - - a **field path** can be used to access the nested data. For - example: - - .. code-block:: python - - >>> get_nested_value('top1', data) - { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - } - >>> get_nested_value('top1.middle2', data) - { - 'bottom3': 20, - 'bottom4': 22, - } - >>> get_nested_value('top1.middle2.bottom3', data) - 20 - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - Args: - field_path (str): A field path (``.``-delimited list of - field names). - data (Dict[str, Any]): The (possibly nested) data. - - Returns: - Any: (A copy of) the value stored for the ``field_path``. - - Raises: - KeyError: If the ``field_path`` does not match nested data. - """ - field_names = parse_field_path(field_path) - - nested_data = data - for index, field_name in enumerate(field_names): - if isinstance(nested_data, collections_abc.Mapping): - if field_name in nested_data: - nested_data = nested_data[field_name] - else: - if index == 0: - msg = _FIELD_PATH_MISSING_TOP.format(field_name) - raise KeyError(msg) - else: - partial = render_field_path(field_names[:index]) - msg = _FIELD_PATH_MISSING_KEY.format(field_name, partial) - raise KeyError(msg) - else: - partial = render_field_path(field_names[:index]) - msg = _FIELD_PATH_WRONG_TYPE.format(partial, field_name) - raise KeyError(msg) - - return nested_data - - -class FieldPath(object): - """Field Path object for client use. - - A field path is a sequence of element keys, separated by periods. - Each element key can be either a simple identifier, or a full unicode - string. - - In the string representation of a field path, non-identifier elements - must be quoted using backticks, with internal backticks and backslashes - escaped with a backslash. - - Args: - parts: (one or more strings) - Indicating path of the key to be used. - """ - - def __init__(self, *parts): - for part in parts: - if not isinstance(part, six.string_types) or not part: - error = "One or more components is not a string or is empty." - raise ValueError(error) - self.parts = tuple(parts) - - @classmethod - def from_api_repr(cls, api_repr): - """Factory: create a FieldPath from the string formatted per the API. - - Args: - api_repr (str): a string path, with non-identifier elements quoted - It cannot exceed 1500 characters, and cannot be empty. - Returns: - (:class:`FieldPath`) An instance parsed from ``api_repr``. - Raises: - ValueError if the parsing fails - """ - api_repr = api_repr.strip() - if not api_repr: - raise ValueError("Field path API representation cannot be empty.") - return cls(*parse_field_path(api_repr)) - - @classmethod - def from_string(cls, path_string): - """Factory: create a FieldPath from a unicode string representation. - - This method splits on the character `.` and disallows the - characters `~*/[]`. To create a FieldPath whose components have - those characters, call the constructor. - - Args: - path_string (str): A unicode string which cannot contain - `~*/[]` characters, cannot exceed 1500 bytes, and cannot be empty. - - Returns: - (:class:`FieldPath`) An instance parsed from ``path_string``. - """ - try: - return cls.from_api_repr(path_string) - except ValueError: - elements = path_string.split(".") - for element in elements: - if not element: - raise ValueError("Empty element") - if _LEADING_ALPHA_INVALID.match(element): - raise ValueError( - "Non-alphanum char in element with leading alpha: {}".format( - element - ) - ) - return FieldPath(*elements) - - def __repr__(self): - paths = "" - for part in self.parts: - paths += "'" + part + "'," - paths = paths[:-1] - return "FieldPath({})".format(paths) - - def __hash__(self): - return hash(self.to_api_repr()) - - def __eq__(self, other): - if isinstance(other, FieldPath): - return self.parts == other.parts - return NotImplemented - - def __lt__(self, other): - if isinstance(other, FieldPath): - return self.parts < other.parts - return NotImplemented - - def __add__(self, other): - """Adds `other` field path to end of this field path. - - Args: - other (~google.cloud.firestore_v1._helpers.FieldPath, str): - The field path to add to the end of this `FieldPath`. - """ - if isinstance(other, FieldPath): - parts = self.parts + other.parts - return FieldPath(*parts) - elif isinstance(other, six.string_types): - parts = self.parts + FieldPath.from_string(other).parts - return FieldPath(*parts) - else: - return NotImplemented - - def to_api_repr(self): - """Render a quoted string representation of the FieldPath - - Returns: - (str) Quoted string representation of the path stored - within this FieldPath. - """ - return render_field_path(self.parts) - - def eq_or_parent(self, other): - """Check whether ``other`` is an ancestor. - - Returns: - (bool) True IFF ``other`` is an ancestor or equal to ``self``, - else False. - """ - return self.parts[: len(other.parts)] == other.parts[: len(self.parts)] - - def lineage(self): - """Return field paths for all parents. - - Returns: Set[:class:`FieldPath`] - """ - indexes = six.moves.range(1, len(self.parts)) - return {FieldPath(*self.parts[:index]) for index in indexes} - - @staticmethod - def document_id(): - """A special FieldPath value to refer to the ID of a document. It can be used - in queries to sort or filter by the document ID. - - Returns: A special sentinel value to refer to the ID of a document. - """ - return "__name__" diff --git a/firestore/google/cloud/firestore_v1/gapic/__init__.py b/firestore/google/cloud/firestore_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1/gapic/enums.py b/firestore/google/cloud/firestore_v1/gapic/enums.py deleted file mode 100644 index ee7a9ec6f589..000000000000 --- a/firestore/google/cloud/firestore_v1/gapic/enums.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value for - the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class DocumentTransform(object): - class FieldTransform(object): - class ServerValue(enum.IntEnum): - """ - A value that is calculated by the server. - - Attributes: - SERVER_VALUE_UNSPECIFIED (int): Unspecified. This value must not be used. - REQUEST_TIME (int): The time at which the server processed the request, with millisecond - precision. - """ - - SERVER_VALUE_UNSPECIFIED = 0 - REQUEST_TIME = 1 - - -class StructuredQuery(object): - class Direction(enum.IntEnum): - """ - A sort direction. - - Attributes: - DIRECTION_UNSPECIFIED (int): Unspecified. - ASCENDING (int): Ascending. - DESCENDING (int): Descending. - """ - - DIRECTION_UNSPECIFIED = 0 - ASCENDING = 1 - DESCENDING = 2 - - class CompositeFilter(object): - class Operator(enum.IntEnum): - """ - A composite filter operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - AND (int): The results are required to satisfy each of the combined filters. - """ - - OPERATOR_UNSPECIFIED = 0 - AND = 1 - - class FieldFilter(object): - class Operator(enum.IntEnum): - """ - A field filter operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - LESS_THAN (int): Less than. Requires that the field come first in ``order_by``. - LESS_THAN_OR_EQUAL (int): Less than or equal. Requires that the field come first in ``order_by``. - GREATER_THAN (int): Greater than. Requires that the field come first in ``order_by``. - GREATER_THAN_OR_EQUAL (int): Greater than or equal. Requires that the field come first in - ``order_by``. - EQUAL (int): Equal. - ARRAY_CONTAINS (int): Contains. Requires that the field is an array. - IN (int): In. Requires that ``value`` is a non-empty ArrayValue with at most 10 - values. - ARRAY_CONTAINS_ANY (int): Contains any. Requires that the field is an array and ``value`` is a - non-empty ArrayValue with at most 10 values. - """ - - OPERATOR_UNSPECIFIED = 0 - LESS_THAN = 1 - LESS_THAN_OR_EQUAL = 2 - GREATER_THAN = 3 - GREATER_THAN_OR_EQUAL = 4 - EQUAL = 5 - ARRAY_CONTAINS = 7 - IN = 8 - ARRAY_CONTAINS_ANY = 9 - - class UnaryFilter(object): - class Operator(enum.IntEnum): - """ - A unary operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - IS_NAN (int): Test if a field is equal to NaN. - IS_NULL (int): Test if an expression evaluates to Null. - """ - - OPERATOR_UNSPECIFIED = 0 - IS_NAN = 2 - IS_NULL = 3 - - -class TargetChange(object): - class TargetChangeType(enum.IntEnum): - """ - The type of change. - - Attributes: - NO_CHANGE (int): No change has occurred. Used only to send an updated ``resume_token``. - ADD (int): The targets have been added. - REMOVE (int): The targets have been removed. - CURRENT (int): The targets reflect all changes committed before the targets were added - to the stream. - - This will be sent after or with a ``read_time`` that is greater than or - equal to the time at which the targets were added. - - Listeners can wait for this change if read-after-write semantics are - desired. - RESET (int): The targets have been reset, and a new initial state for the targets - will be returned in subsequent changes. - - After the initial state is complete, ``CURRENT`` will be returned even - if the target was previously indicated to be ``CURRENT``. - """ - - NO_CHANGE = 0 - ADD = 1 - REMOVE = 2 - CURRENT = 3 - RESET = 4 diff --git a/firestore/google/cloud/firestore_v1/gapic/firestore_client.py b/firestore/google/cloud/firestore_v1/gapic/firestore_client.py deleted file mode 100644 index d6f3e3320698..000000000000 --- a/firestore/google/cloud/firestore_v1/gapic/firestore_client.py +++ /dev/null @@ -1,1452 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.firestore.v1 Firestore API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.firestore_v1.gapic import enums -from google.cloud.firestore_v1.gapic import firestore_client_config -from google.cloud.firestore_v1.gapic.transports import firestore_grpc_transport -from google.cloud.firestore_v1.proto import common_pb2 -from google.cloud.firestore_v1.proto import document_pb2 -from google.cloud.firestore_v1.proto import firestore_pb2 -from google.cloud.firestore_v1.proto import firestore_pb2_grpc -from google.cloud.firestore_v1.proto import query_pb2 -from google.cloud.firestore_v1.proto import write_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-firestore" -).version - - -class FirestoreClient(object): - """ - The Cloud Firestore service. - - Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL - document database that simplifies storing, syncing, and querying data for - your mobile, web, and IoT apps at global scale. Its client libraries provide - live synchronization and offline support, while its security features and - integrations with Firebase and Google Cloud Platform (GCP) accelerate - building truly serverless apps. - """ - - SERVICE_ADDRESS = "firestore.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.firestore.v1.Firestore" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FirestoreClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def any_path_path(cls, project, database, document, any_path): - """Return a fully-qualified any_path string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents/{document}/{any_path=**}", - project=project, - database=database, - document=document, - any_path=any_path, - ) - - @classmethod - def database_root_path(cls, project, database): - """Return a fully-qualified database_root string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}", - project=project, - database=database, - ) - - @classmethod - def document_path_path(cls, project, database, document_path): - """Return a fully-qualified document_path string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents/{document_path=**}", - project=project, - database=database, - document_path=document_path, - ) - - @classmethod - def document_root_path(cls, project, database): - """Return a fully-qualified document_root string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents", - project=project, - database=database, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.FirestoreGrpcTransport, - Callable[[~.Credentials, type], ~.FirestoreGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = firestore_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=firestore_grpc_transport.FirestoreGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = firestore_grpc_transport.FirestoreGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def get_document( - self, - name, - mask=None, - transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single document. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> response = client.get_document(name) - - Args: - name (str): Required. The resource name of the Document to get. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - transaction (bytes): Reads the document in a transaction. - read_time (Union[dict, ~google.cloud.firestore_v1.types.Timestamp]): Reads the version of the document at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_document" not in self._inner_api_calls: - self._inner_api_calls[ - "get_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_document, - default_retry=self._method_configs["GetDocument"].retry, - default_timeout=self._method_configs["GetDocument"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, read_time=read_time - ) - - request = firestore_pb2.GetDocumentRequest( - name=name, mask=mask, transaction=transaction, read_time=read_time - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_documents( - self, - parent, - collection_id, - page_size=None, - order_by=None, - mask=None, - transaction=None, - read_time=None, - show_missing=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists documents. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # TODO: Initialize `collection_id`: - >>> collection_id = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_documents(parent, collection_id): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_documents(parent, collection_id).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: ``projects/my-project/databases/my-database/documents`` or - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - collection_id (str): Required. The collection ID, relative to ``parent``, to list. For - example: ``chatrooms`` or ``messages``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - order_by (str): The order to sort results by. For example: ``priority desc, name``. - mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If a document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - transaction (bytes): Reads documents in a transaction. - read_time (Union[dict, ~google.cloud.firestore_v1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Timestamp` - show_missing (bool): If the list should show missing documents. A missing document is a - document that does not exist but has sub-documents. These documents will - be returned with a key but will not have fields, - ``Document.create_time``, or ``Document.update_time`` set. - - Requests with ``show_missing`` may not specify ``where`` or - ``order_by``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.firestore_v1.types.Document` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "list_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_documents, - default_retry=self._method_configs["ListDocuments"].retry, - default_timeout=self._method_configs["ListDocuments"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, read_time=read_time - ) - - request = firestore_pb2.ListDocumentsRequest( - parent=parent, - collection_id=collection_id, - page_size=page_size, - order_by=order_by, - mask=mask, - transaction=transaction, - read_time=read_time, - show_missing=show_missing, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_documents"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="documents", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def create_document( - self, - parent, - collection_id, - document_id, - document, - mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new document. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # TODO: Initialize `collection_id`: - >>> collection_id = '' - >>> - >>> # TODO: Initialize `document_id`: - >>> document_id = '' - >>> - >>> # TODO: Initialize `document`: - >>> document = {} - >>> - >>> response = client.create_document(parent, collection_id, document_id, document) - - Args: - parent (str): Required. The parent resource. For example: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`` - collection_id (str): Required. The collection ID, relative to ``parent``, to list. For - example: ``chatrooms``. - document_id (str): The client-assigned document ID to use for this document. - - Optional. If not specified, an ID will be assigned by the service. - document (Union[dict, ~google.cloud.firestore_v1.types.Document]): Required. The document to create. ``name`` must not be set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Document` - mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_document" not in self._inner_api_calls: - self._inner_api_calls[ - "create_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_document, - default_retry=self._method_configs["CreateDocument"].retry, - default_timeout=self._method_configs["CreateDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.CreateDocumentRequest( - parent=parent, - collection_id=collection_id, - document_id=document_id, - document=document, - mask=mask, - ) - return self._inner_api_calls["create_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_document( - self, - document, - update_mask, - mask=None, - current_document=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates or inserts a document. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> # TODO: Initialize `document`: - >>> document = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_document(document, update_mask) - - Args: - document (Union[dict, ~google.cloud.firestore_v1.types.Document]): Required. The updated document. - Creates the document if it does not already exist. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Document` - update_mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to update. - None of the field paths in the mask may contain a reserved name. - - If the document exists on the server and has fields not referenced in the - mask, they are left unchanged. - Fields referenced in the mask, but not present in the input document, are - deleted from the document on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - current_document (Union[dict, ~google.cloud.firestore_v1.types.Precondition]): An optional precondition on the document. - The request will fail if this is set and not met by the target document. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Precondition` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_document" not in self._inner_api_calls: - self._inner_api_calls[ - "update_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_document, - default_retry=self._method_configs["UpdateDocument"].retry, - default_timeout=self._method_configs["UpdateDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.UpdateDocumentRequest( - document=document, - update_mask=update_mask, - mask=mask, - current_document=current_document, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("document.name", document.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_document( - self, - name, - current_document=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a document. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> client.delete_document(name) - - Args: - name (str): Required. The resource name of the Document to delete. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - current_document (Union[dict, ~google.cloud.firestore_v1.types.Precondition]): An optional precondition on the document. - The request will fail if this is set and not met by the target document. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Precondition` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_document" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_document, - default_retry=self._method_configs["DeleteDocument"].retry, - default_timeout=self._method_configs["DeleteDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.DeleteDocumentRequest( - name=name, current_document=current_document - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_get_documents( - self, - database, - documents, - mask=None, - transaction=None, - new_transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets multiple documents. - - Documents returned by this method are not guaranteed to be returned in the - same order that they were requested. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `documents`: - >>> documents = [] - >>> - >>> for element in client.batch_get_documents(database, documents): - ... # process element - ... pass - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - documents (list[str]): The names of the documents to retrieve. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - The request will fail if any of the document is not a child resource of - the given ``database``. Duplicate names will be elided. - mask (Union[dict, ~google.cloud.firestore_v1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If a document has a field that is not present in this mask, that field will - not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.DocumentMask` - transaction (bytes): Reads documents in a transaction. - new_transaction (Union[dict, ~google.cloud.firestore_v1.types.TransactionOptions]): Starts a new transaction and reads the documents. - Defaults to a read-only transaction. - The new transaction ID will be returned as the first response in the - stream. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.TransactionOptions` - read_time (Union[dict, ~google.cloud.firestore_v1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1.types.BatchGetDocumentsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_get_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_get_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_get_documents, - default_retry=self._method_configs["BatchGetDocuments"].retry, - default_timeout=self._method_configs["BatchGetDocuments"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - - request = firestore_pb2.BatchGetDocumentsRequest( - database=database, - documents=documents, - mask=mask, - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["batch_get_documents"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def begin_transaction( - self, - database, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts a new transaction. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> response = client.begin_transaction(database) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - options_ (Union[dict, ~google.cloud.firestore_v1.types.TransactionOptions]): The options for the transaction. - Defaults to a read-write transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.TransactionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1.types.BeginTransactionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "begin_transaction" not in self._inner_api_calls: - self._inner_api_calls[ - "begin_transaction" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.begin_transaction, - default_retry=self._method_configs["BeginTransaction"].retry, - default_timeout=self._method_configs["BeginTransaction"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.BeginTransactionRequest( - database=database, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["begin_transaction"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def commit( - self, - database, - writes, - transaction=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Commits a transaction, while optionally updating documents. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `writes`: - >>> writes = [] - >>> - >>> response = client.commit(database, writes) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - writes (list[Union[dict, ~google.cloud.firestore_v1.types.Write]]): The writes to apply. - - Always executed atomically and in order. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Write` - transaction (bytes): If set, applies all writes in this transaction, and commits it. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1.types.CommitResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "commit" not in self._inner_api_calls: - self._inner_api_calls[ - "commit" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.commit, - default_retry=self._method_configs["Commit"].retry, - default_timeout=self._method_configs["Commit"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.CommitRequest( - database=database, writes=writes, transaction=transaction - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["commit"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback( - self, - database, - transaction, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a transaction. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `transaction`: - >>> transaction = b'' - >>> - >>> client.rollback(database, transaction) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - transaction (bytes): Required. The transaction to roll back. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback, - default_retry=self._method_configs["Rollback"].retry, - default_timeout=self._method_configs["Rollback"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.RollbackRequest( - database=database, transaction=transaction - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["rollback"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def run_query( - self, - parent, - structured_query=None, - transaction=None, - new_transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Runs a query. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> for element in client.run_query(parent): - ... # process element - ... pass - - Args: - parent (str): Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: ``projects/my-project/databases/my-database/documents`` or - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - structured_query (Union[dict, ~google.cloud.firestore_v1.types.StructuredQuery]): A structured query. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.StructuredQuery` - transaction (bytes): Reads documents in a transaction. - new_transaction (Union[dict, ~google.cloud.firestore_v1.types.TransactionOptions]): Starts a new transaction and reads the documents. - Defaults to a read-only transaction. - The new transaction ID will be returned as the first response in the - stream. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.TransactionOptions` - read_time (Union[dict, ~google.cloud.firestore_v1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1.types.RunQueryResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "run_query" not in self._inner_api_calls: - self._inner_api_calls[ - "run_query" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.run_query, - default_retry=self._method_configs["RunQuery"].retry, - default_timeout=self._method_configs["RunQuery"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(structured_query=structured_query) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - - request = firestore_pb2.RunQueryRequest( - parent=parent, - structured_query=structured_query, - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["run_query"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def write( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams batches of document updates and deletes, in order. - - EXPERIMENTAL: This method interface might change in the future. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> request = {'database': database} - >>> - >>> requests = [request] - >>> for element in client.write(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.firestore_v1.proto.firestore_pb2.WriteRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.firestore_v1.types.WriteRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1.types.WriteResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "write" not in self._inner_api_calls: - self._inner_api_calls[ - "write" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.write, - default_retry=self._method_configs["Write"].retry, - default_timeout=self._method_configs["Write"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["write"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) - - def listen( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Listens to changes. - - EXPERIMENTAL: This method interface might change in the future. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> request = {'database': database} - >>> - >>> requests = [request] - >>> for element in client.listen(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.firestore_v1.proto.firestore_pb2.ListenRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.firestore_v1.types.ListenRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1.types.ListenResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "listen" not in self._inner_api_calls: - self._inner_api_calls[ - "listen" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.listen, - default_retry=self._method_configs["Listen"].retry, - default_timeout=self._method_configs["Listen"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["listen"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_collection_ids( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all the collection IDs underneath a document. - - Example: - >>> from google.cloud import firestore_v1 - >>> - >>> client = firestore_v1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # Iterate over all results - >>> for element in client.list_collection_ids(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_collection_ids(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The parent document. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`str` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_collection_ids" not in self._inner_api_calls: - self._inner_api_calls[ - "list_collection_ids" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_collection_ids, - default_retry=self._method_configs["ListCollectionIds"].retry, - default_timeout=self._method_configs["ListCollectionIds"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.ListCollectionIdsRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_collection_ids"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="collection_ids", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/firestore/google/cloud/firestore_v1/gapic/firestore_client_config.py b/firestore/google/cloud/firestore_v1/gapic/firestore_client_config.py deleted file mode 100644 index 53f9f267dd08..000000000000 --- a/firestore/google/cloud/firestore_v1/gapic/firestore_client_config.py +++ /dev/null @@ -1,97 +0,0 @@ -config = { - "interfaces": { - "google.firestore.v1.Firestore": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "streaming": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "GetDocument": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListDocuments": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateDocument": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateDocument": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteDocument": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "BatchGetDocuments": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "BeginTransaction": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "Commit": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "Rollback": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "RunQuery": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "Write": { - "timeout_millis": 86400000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "streaming", - }, - "Listen": { - "timeout_millis": 86400000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "ListCollectionIds": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/firestore/google/cloud/firestore_v1/gapic/transports/__init__.py b/firestore/google/cloud/firestore_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1/gapic/transports/firestore_grpc_transport.py b/firestore/google/cloud/firestore_v1/gapic/transports/firestore_grpc_transport.py deleted file mode 100644 index ce730eaacca0..000000000000 --- a/firestore/google/cloud/firestore_v1/gapic/transports/firestore_grpc_transport.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.firestore_v1.proto import firestore_pb2_grpc - - -class FirestoreGrpcTransport(object): - """gRPC transport class providing stubs for - google.firestore.v1 Firestore API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/datastore", - ) - - def __init__( - self, channel=None, credentials=None, address="firestore.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = {"firestore_stub": firestore_pb2_grpc.FirestoreStub(channel)} - - @classmethod - def create_channel( - cls, address="firestore.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def get_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.get_document`. - - Gets a single document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].GetDocument - - @property - def list_documents(self): - """Return the gRPC stub for :meth:`FirestoreClient.list_documents`. - - Lists documents. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].ListDocuments - - @property - def create_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.create_document`. - - Creates a new document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].CreateDocument - - @property - def update_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.update_document`. - - Updates or inserts a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].UpdateDocument - - @property - def delete_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.delete_document`. - - Deletes a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].DeleteDocument - - @property - def batch_get_documents(self): - """Return the gRPC stub for :meth:`FirestoreClient.batch_get_documents`. - - Gets multiple documents. - - Documents returned by this method are not guaranteed to be returned in the - same order that they were requested. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].BatchGetDocuments - - @property - def begin_transaction(self): - """Return the gRPC stub for :meth:`FirestoreClient.begin_transaction`. - - Starts a new transaction. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].BeginTransaction - - @property - def commit(self): - """Return the gRPC stub for :meth:`FirestoreClient.commit`. - - Commits a transaction, while optionally updating documents. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Commit - - @property - def rollback(self): - """Return the gRPC stub for :meth:`FirestoreClient.rollback`. - - Rolls back a transaction. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Rollback - - @property - def run_query(self): - """Return the gRPC stub for :meth:`FirestoreClient.run_query`. - - Runs a query. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].RunQuery - - @property - def write(self): - """Return the gRPC stub for :meth:`FirestoreClient.write`. - - Streams batches of document updates and deletes, in order. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Write - - @property - def listen(self): - """Return the gRPC stub for :meth:`FirestoreClient.listen`. - - Listens to changes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Listen - - @property - def list_collection_ids(self): - """Return the gRPC stub for :meth:`FirestoreClient.list_collection_ids`. - - Lists all the collection IDs underneath a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].ListCollectionIds diff --git a/firestore/google/cloud/firestore_v1/order.py b/firestore/google/cloud/firestore_v1/order.py deleted file mode 100644 index d70293a36a5d..000000000000 --- a/firestore/google/cloud/firestore_v1/order.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from enum import Enum -from google.cloud.firestore_v1._helpers import decode_value -import math - - -class TypeOrder(Enum): - # NOTE: This order is defined by the backend and cannot be changed. - NULL = 0 - BOOLEAN = 1 - NUMBER = 2 - TIMESTAMP = 3 - STRING = 4 - BLOB = 5 - REF = 6 - GEO_POINT = 7 - ARRAY = 8 - OBJECT = 9 - - @staticmethod - def from_value(value): - v = value.WhichOneof("value_type") - - lut = { - "null_value": TypeOrder.NULL, - "boolean_value": TypeOrder.BOOLEAN, - "integer_value": TypeOrder.NUMBER, - "double_value": TypeOrder.NUMBER, - "timestamp_value": TypeOrder.TIMESTAMP, - "string_value": TypeOrder.STRING, - "bytes_value": TypeOrder.BLOB, - "reference_value": TypeOrder.REF, - "geo_point_value": TypeOrder.GEO_POINT, - "array_value": TypeOrder.ARRAY, - "map_value": TypeOrder.OBJECT, - } - - if v not in lut: - raise ValueError("Could not detect value type for " + v) - return lut[v] - - -class Order(object): - """ - Order implements the ordering semantics of the backend. - """ - - @classmethod - def compare(cls, left, right): - """ - Main comparison function for all Firestore types. - @return -1 is left < right, 0 if left == right, otherwise 1 - """ - # First compare the types. - leftType = TypeOrder.from_value(left).value - rightType = TypeOrder.from_value(right).value - - if leftType != rightType: - if leftType < rightType: - return -1 - return 1 - - value_type = left.WhichOneof("value_type") - - if value_type == "null_value": - return 0 # nulls are all equal - elif value_type == "boolean_value": - return cls._compare_to(left.boolean_value, right.boolean_value) - elif value_type == "integer_value": - return cls.compare_numbers(left, right) - elif value_type == "double_value": - return cls.compare_numbers(left, right) - elif value_type == "timestamp_value": - return cls.compare_timestamps(left, right) - elif value_type == "string_value": - return cls._compare_to(left.string_value, right.string_value) - elif value_type == "bytes_value": - return cls.compare_blobs(left, right) - elif value_type == "reference_value": - return cls.compare_resource_paths(left, right) - elif value_type == "geo_point_value": - return cls.compare_geo_points(left, right) - elif value_type == "array_value": - return cls.compare_arrays(left, right) - elif value_type == "map_value": - return cls.compare_objects(left, right) - else: - raise ValueError("Unknown ``value_type``", str(value_type)) - - @staticmethod - def compare_blobs(left, right): - left_bytes = left.bytes_value - right_bytes = right.bytes_value - - return Order._compare_to(left_bytes, right_bytes) - - @staticmethod - def compare_timestamps(left, right): - left = left.timestamp_value - right = right.timestamp_value - - seconds = Order._compare_to(left.seconds or 0, right.seconds or 0) - if seconds != 0: - return seconds - - return Order._compare_to(left.nanos or 0, right.nanos or 0) - - @staticmethod - def compare_geo_points(left, right): - left_value = decode_value(left, None) - right_value = decode_value(right, None) - cmp = (left_value.latitude > right_value.latitude) - ( - left_value.latitude < right_value.latitude - ) - - if cmp != 0: - return cmp - return (left_value.longitude > right_value.longitude) - ( - left_value.longitude < right_value.longitude - ) - - @staticmethod - def compare_resource_paths(left, right): - left = left.reference_value - right = right.reference_value - - left_segments = left.split("/") - right_segments = right.split("/") - shorter = min(len(left_segments), len(right_segments)) - # compare segments - for i in range(shorter): - if left_segments[i] < right_segments[i]: - return -1 - if left_segments[i] > right_segments[i]: - return 1 - - left_length = len(left) - right_length = len(right) - return (left_length > right_length) - (left_length < right_length) - - @staticmethod - def compare_arrays(left, right): - l_values = left.array_value.values - r_values = right.array_value.values - - length = min(len(l_values), len(r_values)) - for i in range(length): - cmp = Order.compare(l_values[i], r_values[i]) - if cmp != 0: - return cmp - - return Order._compare_to(len(l_values), len(r_values)) - - @staticmethod - def compare_objects(left, right): - left_fields = left.map_value.fields - right_fields = right.map_value.fields - - for left_key, right_key in zip(sorted(left_fields), sorted(right_fields)): - keyCompare = Order._compare_to(left_key, right_key) - if keyCompare != 0: - return keyCompare - - value_compare = Order.compare( - left_fields[left_key], right_fields[right_key] - ) - if value_compare != 0: - return value_compare - - return Order._compare_to(len(left_fields), len(right_fields)) - - @staticmethod - def compare_numbers(left, right): - left_value = decode_value(left, None) - right_value = decode_value(right, None) - return Order.compare_doubles(left_value, right_value) - - @staticmethod - def compare_doubles(left, right): - if math.isnan(left): - if math.isnan(right): - return 0 - return -1 - if math.isnan(right): - return 1 - - return Order._compare_to(left, right) - - @staticmethod - def _compare_to(left, right): - # We can't just use cmp(left, right) because cmp doesn't exist - # in Python 3, so this is an equivalent suggested by - # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons - return (left > right) - (left < right) diff --git a/firestore/google/cloud/firestore_v1/proto/__init__.py b/firestore/google/cloud/firestore_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1/proto/common.proto b/firestore/google/cloud/firestore_v1/proto/common.proto deleted file mode 100644 index 8e2ef27ff28a..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/common.proto +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; - -// A set of field paths on a document. -// Used to restrict a get or update operation on a document to a subset of its -// fields. -// This is different from standard field masks, as this is always scoped to a -// [Document][google.firestore.v1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1.Value]. -message DocumentMask { - // The list of field paths in the mask. See [Document.fields][google.firestore.v1.Document.fields] for a field - // path syntax reference. - repeated string field_paths = 1; -} - -// A precondition on a document, used for conditional operations. -message Precondition { - // The type of precondition. - oneof condition_type { - // When set to `true`, the target document must exist. - // When set to `false`, the target document must not exist. - bool exists = 1; - - // When set, the target document must exist and have been last updated at - // that time. - google.protobuf.Timestamp update_time = 2; - } -} - -// Options for creating a new transaction. -message TransactionOptions { - // Options for a transaction that can be used to read and write documents. - message ReadWrite { - // An optional transaction to retry. - bytes retry_transaction = 1; - } - - // Options for a transaction that can only be used to read documents. - message ReadOnly { - // The consistency mode for this transaction. If not set, defaults to strong - // consistency. - oneof consistency_selector { - // Reads documents at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 2; - } - } - - // The mode of the transaction. - oneof mode { - // The transaction can only be used for read operations. - ReadOnly read_only = 2; - - // The transaction can be used for both read and write operations. - ReadWrite read_write = 3; - } -} diff --git a/firestore/google/cloud/firestore_v1/proto/common_pb2.py b/firestore/google/cloud/firestore_v1/proto/common_pb2.py deleted file mode 100644 index 3d25c5b80c75..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/common_pb2.py +++ /dev/null @@ -1,454 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/common.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/common.proto", - package="google.firestore.v1", - syntax="proto3", - serialized_options=_b( - "\n\027com.google.firestore.v1B\013CommonProtoP\001Z fields = 2; - - // Output only. The time at which the document was created. - // - // This value increases monotonically when a document is deleted then - // recreated. It can also be compared to values from other documents and - // the `read_time` of a query. - google.protobuf.Timestamp create_time = 3; - - // Output only. The time at which the document was last changed. - // - // This value is initially set to the `create_time` then increases - // monotonically with each change to the document. It can also be - // compared to values from other documents and the `read_time` of a query. - google.protobuf.Timestamp update_time = 4; -} - -// A message that can hold any of the supported value types. -message Value { - // Must have a value set. - oneof value_type { - // A null value. - google.protobuf.NullValue null_value = 11; - - // A boolean value. - bool boolean_value = 1; - - // An integer value. - int64 integer_value = 2; - - // A double value. - double double_value = 3; - - // A timestamp value. - // - // Precise only to microseconds. When stored, any additional precision is - // rounded down. - google.protobuf.Timestamp timestamp_value = 10; - - // A string value. - // - // The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes of the UTF-8 representation are considered by - // queries. - string string_value = 17; - - // A bytes value. - // - // Must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes are considered by queries. - bytes bytes_value = 18; - - // A reference to a document. For example: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string reference_value = 5; - - // A geo point value representing a point on the surface of Earth. - google.type.LatLng geo_point_value = 8; - - // An array value. - // - // Cannot directly contain another array value, though can contain an - // map which contains another array. - ArrayValue array_value = 9; - - // A map value. - MapValue map_value = 6; - } -} - -// An array value. -message ArrayValue { - // Values in the array. - repeated Value values = 1; -} - -// A map value. -message MapValue { - // The map's fields. - // - // The map keys represent field names. Field names matching the regular - // expression `__.*__` are reserved. Reserved field names are forbidden except - // in certain documented contexts. The map keys, represented as UTF-8, must - // not exceed 1,500 bytes and cannot be empty. - map fields = 1; -} diff --git a/firestore/google/cloud/firestore_v1/proto/document_pb2.py b/firestore/google/cloud/firestore_v1/proto/document_pb2.py deleted file mode 100644 index 82111a82299e..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/document_pb2.py +++ /dev/null @@ -1,798 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/document.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/document.proto", - package="google.firestore.v1", - syntax="proto3", - serialized_options=_b( - "\n\027com.google.firestore.v1B\rDocumentProtoP\001Z labels = 5; -} - -// The response for [Firestore.Write][google.firestore.v1.Firestore.Write]. -message WriteResponse { - // The ID of the stream. - // Only set on the first message, when a new stream was created. - string stream_id = 1; - - // A token that represents the position of this response in the stream. - // This can be used by a client to resume the stream at this point. - // - // This field is always set. - bytes stream_token = 2; - - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 3; - - // The time at which the commit occurred. Any read with an equal or greater - // `read_time` is guaranteed to see the effects of the write. - google.protobuf.Timestamp commit_time = 4; -} - -// A request for [Firestore.Listen][google.firestore.v1.Firestore.Listen] -message ListenRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The supported target changes. - oneof target_change { - // A target to add to this stream. - Target add_target = 2; - - // The ID of a target to remove from this stream. - int32 remove_target = 3; - } - - // Labels associated with this target change. - map labels = 4; -} - -// The response for [Firestore.Listen][google.firestore.v1.Firestore.Listen]. -message ListenResponse { - // The supported responses. - oneof response_type { - // Targets have changed. - TargetChange target_change = 2; - - // A [Document][google.firestore.v1.Document] has changed. - DocumentChange document_change = 3; - - // A [Document][google.firestore.v1.Document] has been deleted. - DocumentDelete document_delete = 4; - - // A [Document][google.firestore.v1.Document] has been removed from a target (because it is no longer - // relevant to that target). - DocumentRemove document_remove = 6; - - // A filter to apply to the set of documents previously returned for the - // given target. - // - // Returned when documents may have been removed from the given target, but - // the exact documents are unknown. - ExistenceFilter filter = 5; - } -} - -// A specification of a set of documents to listen to. -message Target { - // A target specified by a set of documents names. - message DocumentsTarget { - // The names of the documents to retrieve. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // The request will fail if any of the document is not a child resource of - // the given `database`. Duplicate names will be elided. - repeated string documents = 2; - } - - // A target specified by a query. - message QueryTarget { - // The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1; - - // The query to run. - oneof query_type { - // A structured query. - StructuredQuery structured_query = 2; - } - } - - // The type of target to listen to. - oneof target_type { - // A target specified by a query. - QueryTarget query = 2; - - // A target specified by a set of document names. - DocumentsTarget documents = 3; - } - - // When to start listening. - // - // If not specified, all matching Documents are returned before any - // subsequent changes. - oneof resume_type { - // A resume token from a prior [TargetChange][google.firestore.v1.TargetChange] for an identical target. - // - // Using a resume token with a different target is unsupported and may fail. - bytes resume_token = 4; - - // Start listening after a specific `read_time`. - // - // The client must know the state of matching documents at this time. - google.protobuf.Timestamp read_time = 11; - } - - // The target ID that identifies the target on the stream. Must be a positive - // number and non-zero. - int32 target_id = 5; - - // If the target should be removed once it is current and consistent. - bool once = 6; -} - -// Targets being watched have changed. -message TargetChange { - // The type of change. - enum TargetChangeType { - // No change has occurred. Used only to send an updated `resume_token`. - NO_CHANGE = 0; - - // The targets have been added. - ADD = 1; - - // The targets have been removed. - REMOVE = 2; - - // The targets reflect all changes committed before the targets were added - // to the stream. - // - // This will be sent after or with a `read_time` that is greater than or - // equal to the time at which the targets were added. - // - // Listeners can wait for this change if read-after-write semantics - // are desired. - CURRENT = 3; - - // The targets have been reset, and a new initial state for the targets - // will be returned in subsequent changes. - // - // After the initial state is complete, `CURRENT` will be returned even - // if the target was previously indicated to be `CURRENT`. - RESET = 4; - } - - // The type of change that occurred. - TargetChangeType target_change_type = 1; - - // The target IDs of targets that have changed. - // - // If empty, the change applies to all targets. - // - // The order of the target IDs is not defined. - repeated int32 target_ids = 2; - - // The error that resulted in this change, if applicable. - google.rpc.Status cause = 3; - - // A token that can be used to resume the stream for the given `target_ids`, - // or all targets if `target_ids` is empty. - // - // Not set on every target change. - bytes resume_token = 4; - - // The consistent `read_time` for the given `target_ids` (omitted when the - // target_ids are not at a consistent snapshot). - // - // The stream is guaranteed to send a `read_time` with `target_ids` empty - // whenever the entire stream reaches a new consistent snapshot. ADD, - // CURRENT, and RESET messages are guaranteed to (eventually) result in a - // new consistent snapshot (while NO_CHANGE and REMOVE messages are not). - // - // For a given stream, `read_time` is guaranteed to be monotonically - // increasing. - google.protobuf.Timestamp read_time = 6; -} - -// The request for [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. -message ListCollectionIdsRequest { - // Required. The parent document. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The maximum number of results to return. - int32 page_size = 2; - - // A page token. Must be a value from - // [ListCollectionIdsResponse][google.firestore.v1.ListCollectionIdsResponse]. - string page_token = 3; -} - -// The response from [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds]. -message ListCollectionIdsResponse { - // The collection ids. - repeated string collection_ids = 1; - - // A page token that may be used to continue the list. - string next_page_token = 2; -} diff --git a/firestore/google/cloud/firestore_v1/proto/firestore_pb2.py b/firestore/google/cloud/firestore_v1/proto/firestore_pb2.py deleted file mode 100644 index 06e39be5b10c..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/firestore_pb2.py +++ /dev/null @@ -1,3806 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/firestore.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.cloud.firestore_v1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1.proto import ( - query_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2, -) -from google.cloud.firestore_v1.proto import ( - write_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_write__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/firestore.proto", - package="google.firestore.v1", - syntax="proto3", - serialized_options=_b( - "\n\027com.google.firestore.v1B\016FirestoreProtoP\001Z\n\x06labels\x18\x04 \x03(\x0b\x32..google.firestore.v1.ListenRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0f\n\rtarget_change"\xd5\x02\n\x0eListenResponse\x12:\n\rtarget_change\x18\x02 \x01(\x0b\x32!.google.firestore.v1.TargetChangeH\x00\x12>\n\x0f\x64ocument_change\x18\x03 \x01(\x0b\x32#.google.firestore.v1.DocumentChangeH\x00\x12>\n\x0f\x64ocument_delete\x18\x04 \x01(\x0b\x32#.google.firestore.v1.DocumentDeleteH\x00\x12>\n\x0f\x64ocument_remove\x18\x06 \x01(\x0b\x32#.google.firestore.v1.DocumentRemoveH\x00\x12\x36\n\x06\x66ilter\x18\x05 \x01(\x0b\x32$.google.firestore.v1.ExistenceFilterH\x00\x42\x0f\n\rresponse_type"\xa1\x03\n\x06Target\x12\x38\n\x05query\x18\x02 \x01(\x0b\x32\'.google.firestore.v1.Target.QueryTargetH\x00\x12@\n\tdocuments\x18\x03 \x01(\x0b\x32+.google.firestore.v1.Target.DocumentsTargetH\x00\x12\x16\n\x0cresume_token\x18\x04 \x01(\x0cH\x01\x12/\n\tread_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x12\x11\n\ttarget_id\x18\x05 \x01(\x05\x12\x0c\n\x04once\x18\x06 \x01(\x08\x1a$\n\x0f\x44ocumentsTarget\x12\x11\n\tdocuments\x18\x02 \x03(\t\x1am\n\x0bQueryTarget\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12@\n\x10structured_query\x18\x02 \x01(\x0b\x32$.google.firestore.v1.StructuredQueryH\x00\x42\x0c\n\nquery_typeB\r\n\x0btarget_typeB\r\n\x0bresume_type"\xaa\x02\n\x0cTargetChange\x12N\n\x12target_change_type\x18\x01 \x01(\x0e\x32\x32.google.firestore.v1.TargetChange.TargetChangeType\x12\x12\n\ntarget_ids\x18\x02 \x03(\x05\x12!\n\x05\x63\x61use\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\x14\n\x0cresume_token\x18\x04 \x01(\x0c\x12-\n\tread_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"N\n\x10TargetChangeType\x12\r\n\tNO_CHANGE\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02\x12\x0b\n\x07\x43URRENT\x10\x03\x12\t\n\x05RESET\x10\x04"V\n\x18ListCollectionIdsRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"L\n\x19ListCollectionIdsResponse\x12\x16\n\x0e\x63ollection_ids\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xd7\x13\n\tFirestore\x12\x8f\x01\n\x0bGetDocument\x12\'.google.firestore.v1.GetDocumentRequest\x1a\x1d.google.firestore.v1.Document"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1/{name=projects/*/databases/*/documents/*/**}\x12\xb2\x01\n\rListDocuments\x12).google.firestore.v1.ListDocumentsRequest\x1a*.google.firestore.v1.ListDocumentsResponse"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}\x12\xaf\x01\n\x0e\x43reateDocument\x12*.google.firestore.v1.CreateDocumentRequest\x1a\x1d.google.firestore.v1.Document"R\x82\xd3\xe4\x93\x02L"@/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}:\x08\x64ocument\x12\xbf\x01\n\x0eUpdateDocument\x12*.google.firestore.v1.UpdateDocumentRequest\x1a\x1d.google.firestore.v1.Document"b\x82\xd3\xe4\x93\x02\x45\x32\x39/v1/{document.name=projects/*/databases/*/documents/*/**}:\x08\x64ocument\xda\x41\x14\x64ocument,update_mask\x12\x95\x01\n\x0e\x44\x65leteDocument\x12*.google.firestore.v1.DeleteDocumentRequest\x1a\x16.google.protobuf.Empty"?\x82\xd3\xe4\x93\x02\x32*0/v1/{name=projects/*/databases/*/documents/*/**}\xda\x41\x04name\x12\xb9\x01\n\x11\x42\x61tchGetDocuments\x12-.google.firestore.v1.BatchGetDocumentsRequest\x1a..google.firestore.v1.BatchGetDocumentsResponse"C\x82\xd3\xe4\x93\x02="8/v1/{database=projects/*/databases/*}/documents:batchGet:\x01*0\x01\x12\xc7\x01\n\x10\x42\x65ginTransaction\x12,.google.firestore.v1.BeginTransactionRequest\x1a-.google.firestore.v1.BeginTransactionResponse"V\x82\xd3\xe4\x93\x02\x45"@/v1/{database=projects/*/databases/*}/documents:beginTransaction:\x01*\xda\x41\x08\x64\x61tabase\x12\xa6\x01\n\x06\x43ommit\x12".google.firestore.v1.CommitRequest\x1a#.google.firestore.v1.CommitResponse"S\x82\xd3\xe4\x93\x02;"6/v1/{database=projects/*/databases/*}/documents:commit:\x01*\xda\x41\x0f\x64\x61tabase,writes\x12\xa4\x01\n\x08Rollback\x12$.google.firestore.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"Z\x82\xd3\xe4\x93\x02="8/v1/{database=projects/*/databases/*}/documents:rollback:\x01*\xda\x41\x14\x64\x61tabase,transaction\x12\xdf\x01\n\x08RunQuery\x12$.google.firestore.v1.RunQueryRequest\x1a%.google.firestore.v1.RunQueryResponse"\x83\x01\x82\xd3\xe4\x93\x02}"6/v1/{parent=projects/*/databases/*/documents}:runQuery:\x01*Z@";/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery:\x01*0\x01\x12\x94\x01\n\x05Write\x12!.google.firestore.v1.WriteRequest\x1a".google.firestore.v1.WriteResponse"@\x82\xd3\xe4\x93\x02:"5/v1/{database=projects/*/databases/*}/documents:write:\x01*(\x01\x30\x01\x12\x98\x01\n\x06Listen\x12".google.firestore.v1.ListenRequest\x1a#.google.firestore.v1.ListenResponse"A\x82\xd3\xe4\x93\x02;"6/v1/{database=projects/*/databases/*}/documents:listen:\x01*(\x01\x30\x01\x12\x94\x02\n\x11ListCollectionIds\x12-.google.firestore.v1.ListCollectionIdsRequest\x1a..google.firestore.v1.ListCollectionIdsResponse"\x9f\x01\x82\xd3\xe4\x93\x02\x8f\x01"?/v1/{parent=projects/*/databases/*/documents}:listCollectionIds:\x01*ZI"D/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds:\x01*\xda\x41\x06parent\x1av\xca\x41\x18\x66irestore.googleapis.com\xd2\x41Xhttps://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastoreB\xa8\x01\n\x17\x63om.google.firestore.v1B\x0e\x46irestoreProtoP\x01Z 1` becomes - // `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__` - repeated Order order_by = 4; - - // A starting point for the query results. - Cursor start_at = 7; - - // A end point for the query results. - Cursor end_at = 8; - - // The number of results to skip. - // - // Applies before limit, but after all other constraints. Must be >= 0 if - // specified. - int32 offset = 6; - - // The maximum number of results to return. - // - // Applies after all other constraints. - // Must be >= 0 if specified. - google.protobuf.Int32Value limit = 5; -} - -// A position in a query result set. -message Cursor { - // The values that represent a position, in the order they appear in - // the order by clause of a query. - // - // Can contain fewer values than specified in the order by clause. - repeated Value values = 1; - - // If the position is just before or just after the given values, relative - // to the sort order defined by the query. - bool before = 2; -} diff --git a/firestore/google/cloud/firestore_v1/proto/query_pb2.py b/firestore/google/cloud/firestore_v1/proto/query_pb2.py deleted file mode 100644 index 6e1982629dc8..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/query_pb2.py +++ /dev/null @@ -1,1200 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/query.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/query.proto", - package="google.firestore.v1", - syntax="proto3", - serialized_options=_b( - "\n\027com.google.firestore.v1B\nQueryProtoP\001Z 1`` - becomes ``SELECT * FROM Foo WHERE A > 1 ORDER BY A, - __name__`` - start_at: - A starting point for the query results. - end_at: - A end point for the query results. - offset: - The number of results to skip. Applies before limit, but - after all other constraints. Must be >= 0 if specified. - limit: - The maximum number of results to return. Applies after all - other constraints. Must be >= 0 if specified. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1.StructuredQuery) - ), -) -_sym_db.RegisterMessage(StructuredQuery) -_sym_db.RegisterMessage(StructuredQuery.CollectionSelector) -_sym_db.RegisterMessage(StructuredQuery.Filter) -_sym_db.RegisterMessage(StructuredQuery.CompositeFilter) -_sym_db.RegisterMessage(StructuredQuery.FieldFilter) -_sym_db.RegisterMessage(StructuredQuery.UnaryFilter) -_sym_db.RegisterMessage(StructuredQuery.Order) -_sym_db.RegisterMessage(StructuredQuery.FieldReference) -_sym_db.RegisterMessage(StructuredQuery.Projection) - -Cursor = _reflection.GeneratedProtocolMessageType( - "Cursor", - (_message.Message,), - dict( - DESCRIPTOR=_CURSOR, - __module__="google.cloud.firestore_v1.proto.query_pb2", - __doc__="""A position in a query result set. - - - Attributes: - values: - The values that represent a position, in the order they appear - in the order by clause of a query. Can contain fewer values - than specified in the order by clause. - before: - If the position is just before or just after the given values, - relative to the sort order defined by the query. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1.Cursor) - ), -) -_sym_db.RegisterMessage(Cursor) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1/proto/query_pb2_grpc.py b/firestore/google/cloud/firestore_v1/proto/query_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/query_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1/proto/test_v1_pb2.py b/firestore/google/cloud/firestore_v1/proto/test_v1_pb2.py deleted file mode 100644 index 336bab948414..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/test_v1_pb2.py +++ /dev/null @@ -1,2190 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: test_v1.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1.proto import ( - firestore_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2, -) -from google.cloud.firestore_v1.proto import ( - query_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="test_v1.proto", - package="tests.v1", - syntax="proto3", - serialized_pb=_b( - '\n\rtest_v1.proto\x12\x08tests.v1\x1a,google/cloud/firestore_v1/proto/common.proto\x1a.google/cloud/firestore_v1/proto/document.proto\x1a/google/cloud/firestore_v1/proto/firestore.proto\x1a+google/cloud/firestore_v1/proto/query.proto\x1a\x1fgoogle/protobuf/timestamp.proto"*\n\tTestSuite\x12\x1d\n\x05tests\x18\x01 \x03(\x0b\x32\x0e.tests.v1.Test"\xe0\x02\n\x04Test\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12 \n\x03get\x18\x02 \x01(\x0b\x32\x11.tests.v1.GetTestH\x00\x12&\n\x06\x63reate\x18\x03 \x01(\x0b\x32\x14.tests.v1.CreateTestH\x00\x12 \n\x03set\x18\x04 \x01(\x0b\x32\x11.tests.v1.SetTestH\x00\x12&\n\x06update\x18\x05 \x01(\x0b\x32\x14.tests.v1.UpdateTestH\x00\x12\x31\n\x0cupdate_paths\x18\x06 \x01(\x0b\x32\x19.tests.v1.UpdatePathsTestH\x00\x12&\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x14.tests.v1.DeleteTestH\x00\x12$\n\x05query\x18\x08 \x01(\x0b\x32\x13.tests.v1.QueryTestH\x00\x12&\n\x06listen\x18\t \x01(\x0b\x32\x14.tests.v1.ListenTestH\x00\x42\x06\n\x04test"Y\n\x07GetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x38\n\x07request\x18\x02 \x01(\x0b\x32\'.google.firestore.v1.GetDocumentRequest"|\n\nCreateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t\x12\x33\n\x07request\x18\x03 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\x9e\x01\n\x07SetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12#\n\x06option\x18\x02 \x01(\x0b\x32\x13.tests.v1.SetOption\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x33\n\x07request\x18\x04 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xb5\x01\n\nUpdateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x33\n\x07request\x18\x04 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xe6\x01\n\x0fUpdatePathsTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12(\n\x0b\x66ield_paths\x18\x03 \x03(\x0b\x32\x13.tests.v1.FieldPath\x12\x13\n\x0bjson_values\x18\x04 \x03(\t\x12\x33\n\x07request\x18\x05 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x06 \x01(\x08"\xa2\x01\n\nDeleteTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12\x33\n\x07request\x18\x03 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"=\n\tSetOption\x12\x0b\n\x03\x61ll\x18\x01 \x01(\x08\x12#\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x13.tests.v1.FieldPath"\x88\x01\n\tQueryTest\x12\x11\n\tcoll_path\x18\x01 \x01(\t\x12!\n\x07\x63lauses\x18\x02 \x03(\x0b\x32\x10.tests.v1.Clause\x12\x33\n\x05query\x18\x03 \x01(\x0b\x32$.google.firestore.v1.StructuredQuery\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\xbd\x02\n\x06\x43lause\x12"\n\x06select\x18\x01 \x01(\x0b\x32\x10.tests.v1.SelectH\x00\x12 \n\x05where\x18\x02 \x01(\x0b\x32\x0f.tests.v1.WhereH\x00\x12%\n\x08order_by\x18\x03 \x01(\x0b\x32\x11.tests.v1.OrderByH\x00\x12\x10\n\x06offset\x18\x04 \x01(\x05H\x00\x12\x0f\n\x05limit\x18\x05 \x01(\x05H\x00\x12$\n\x08start_at\x18\x06 \x01(\x0b\x32\x10.tests.v1.CursorH\x00\x12\'\n\x0bstart_after\x18\x07 \x01(\x0b\x32\x10.tests.v1.CursorH\x00\x12"\n\x06\x65nd_at\x18\x08 \x01(\x0b\x32\x10.tests.v1.CursorH\x00\x12&\n\nend_before\x18\t \x01(\x0b\x32\x10.tests.v1.CursorH\x00\x42\x08\n\x06\x63lause"-\n\x06Select\x12#\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x13.tests.v1.FieldPath"J\n\x05Where\x12!\n\x04path\x18\x01 \x01(\x0b\x32\x13.tests.v1.FieldPath\x12\n\n\x02op\x18\x02 \x01(\t\x12\x12\n\njson_value\x18\x03 \x01(\t"?\n\x07OrderBy\x12!\n\x04path\x18\x01 \x01(\x0b\x32\x13.tests.v1.FieldPath\x12\x11\n\tdirection\x18\x02 \x01(\t"J\n\x06\x43ursor\x12+\n\x0c\x64oc_snapshot\x18\x01 \x01(\x0b\x32\x15.tests.v1.DocSnapshot\x12\x13\n\x0bjson_values\x18\x02 \x03(\t".\n\x0b\x44ocSnapshot\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t"\x1a\n\tFieldPath\x12\r\n\x05\x66ield\x18\x01 \x03(\t"}\n\nListenTest\x12\x36\n\tresponses\x18\x01 \x03(\x0b\x32#.google.firestore.v1.ListenResponse\x12%\n\tsnapshots\x18\x02 \x03(\x0b\x32\x12.tests.v1.Snapshot\x12\x10\n\x08is_error\x18\x03 \x01(\x08"\x8c\x01\n\x08Snapshot\x12+\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x1d.google.firestore.v1.Document\x12$\n\x07\x63hanges\x18\x02 \x03(\x0b\x32\x13.tests.v1.DocChange\x12-\n\tread_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\tDocChange\x12&\n\x04kind\x18\x01 \x01(\x0e\x32\x18.tests.v1.DocChange.Kind\x12*\n\x03\x64oc\x18\x02 \x01(\x0b\x32\x1d.google.firestore.v1.Document\x12\x11\n\told_index\x18\x03 \x01(\x05\x12\x11\n\tnew_index\x18\x04 \x01(\x05"B\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42x\n&com.google.cloud.firestore.conformance\xaa\x02"Google.Cloud.Firestore.Tests.Proto\xca\x02(Google\\Cloud\\Firestore\\Tests\\Conformanceb\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_DOCCHANGE_KIND = _descriptor.EnumDescriptor( - name="Kind", - full_name="tests.v1.DocChange.Kind", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="KIND_UNSPECIFIED", index=0, number=0, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ADDED", index=1, number=1, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REMOVED", index=2, number=2, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="MODIFIED", index=3, number=3, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=2875, - serialized_end=2941, -) -_sym_db.RegisterEnumDescriptor(_DOCCHANGE_KIND) - - -_TESTSUITE = _descriptor.Descriptor( - name="TestSuite", - full_name="tests.v1.TestSuite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="tests", - full_name="tests.v1.TestSuite.tests", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=248, - serialized_end=290, -) - - -_TEST = _descriptor.Descriptor( - name="Test", - full_name="tests.v1.Test", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="description", - full_name="tests.v1.Test.description", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="get", - full_name="tests.v1.Test.get", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="tests.v1.Test.create", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="set", - full_name="tests.v1.Test.set", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="tests.v1.Test.update", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_paths", - full_name="tests.v1.Test.update_paths", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="tests.v1.Test.delete", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="tests.v1.Test.query", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="listen", - full_name="tests.v1.Test.listen", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="test", - full_name="tests.v1.Test.test", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=293, - serialized_end=645, -) - - -_GETTEST = _descriptor.Descriptor( - name="GetTest", - full_name="tests.v1.GetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.GetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.GetTest.request", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=647, - serialized_end=736, -) - - -_CREATETEST = _descriptor.Descriptor( - name="CreateTest", - full_name="tests.v1.CreateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.CreateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1.CreateTest.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.CreateTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.CreateTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=738, - serialized_end=862, -) - - -_SETTEST = _descriptor.Descriptor( - name="SetTest", - full_name="tests.v1.SetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.SetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="option", - full_name="tests.v1.SetTest.option", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1.SetTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.SetTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.SetTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=865, - serialized_end=1023, -) - - -_UPDATETEST = _descriptor.Descriptor( - name="UpdateTest", - full_name="tests.v1.UpdateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.UpdateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1.UpdateTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1.UpdateTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.UpdateTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.UpdateTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1026, - serialized_end=1207, -) - - -_UPDATEPATHSTEST = _descriptor.Descriptor( - name="UpdatePathsTest", - full_name="tests.v1.UpdatePathsTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.UpdatePathsTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1.UpdatePathsTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_paths", - full_name="tests.v1.UpdatePathsTest.field_paths", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="tests.v1.UpdatePathsTest.json_values", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.UpdatePathsTest.request", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.UpdatePathsTest.is_error", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1210, - serialized_end=1440, -) - - -_DELETETEST = _descriptor.Descriptor( - name="DeleteTest", - full_name="tests.v1.DeleteTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1.DeleteTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1.DeleteTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1.DeleteTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.DeleteTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1443, - serialized_end=1605, -) - - -_SETOPTION = _descriptor.Descriptor( - name="SetOption", - full_name="tests.v1.SetOption", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="all", - full_name="tests.v1.SetOption.all", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="tests.v1.SetOption.fields", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1607, - serialized_end=1668, -) - - -_QUERYTEST = _descriptor.Descriptor( - name="QueryTest", - full_name="tests.v1.QueryTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="coll_path", - full_name="tests.v1.QueryTest.coll_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="clauses", - full_name="tests.v1.QueryTest.clauses", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="tests.v1.QueryTest.query", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.QueryTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1671, - serialized_end=1807, -) - - -_CLAUSE = _descriptor.Descriptor( - name="Clause", - full_name="tests.v1.Clause", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="select", - full_name="tests.v1.Clause.select", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="where", - full_name="tests.v1.Clause.where", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="tests.v1.Clause.order_by", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="offset", - full_name="tests.v1.Clause.offset", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="limit", - full_name="tests.v1.Clause.limit", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_at", - full_name="tests.v1.Clause.start_at", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_after", - full_name="tests.v1.Clause.start_after", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_at", - full_name="tests.v1.Clause.end_at", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_before", - full_name="tests.v1.Clause.end_before", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="clause", - full_name="tests.v1.Clause.clause", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1810, - serialized_end=2127, -) - - -_SELECT = _descriptor.Descriptor( - name="Select", - full_name="tests.v1.Select", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="tests.v1.Select.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2129, - serialized_end=2174, -) - - -_WHERE = _descriptor.Descriptor( - name="Where", - full_name="tests.v1.Where", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1.Where.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="op", - full_name="tests.v1.Where.op", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_value", - full_name="tests.v1.Where.json_value", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2176, - serialized_end=2250, -) - - -_ORDERBY = _descriptor.Descriptor( - name="OrderBy", - full_name="tests.v1.OrderBy", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1.OrderBy.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="direction", - full_name="tests.v1.OrderBy.direction", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2252, - serialized_end=2315, -) - - -_CURSOR = _descriptor.Descriptor( - name="Cursor", - full_name="tests.v1.Cursor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_snapshot", - full_name="tests.v1.Cursor.doc_snapshot", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="tests.v1.Cursor.json_values", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2317, - serialized_end=2391, -) - - -_DOCSNAPSHOT = _descriptor.Descriptor( - name="DocSnapshot", - full_name="tests.v1.DocSnapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1.DocSnapshot.path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1.DocSnapshot.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2393, - serialized_end=2439, -) - - -_FIELDPATH = _descriptor.Descriptor( - name="FieldPath", - full_name="tests.v1.FieldPath", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="tests.v1.FieldPath.field", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2441, - serialized_end=2467, -) - - -_LISTENTEST = _descriptor.Descriptor( - name="ListenTest", - full_name="tests.v1.ListenTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="responses", - full_name="tests.v1.ListenTest.responses", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="snapshots", - full_name="tests.v1.ListenTest.snapshots", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1.ListenTest.is_error", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2469, - serialized_end=2594, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="tests.v1.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="docs", - full_name="tests.v1.Snapshot.docs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="changes", - full_name="tests.v1.Snapshot.changes", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="tests.v1.Snapshot.read_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2597, - serialized_end=2737, -) - - -_DOCCHANGE = _descriptor.Descriptor( - name="DocChange", - full_name="tests.v1.DocChange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="kind", - full_name="tests.v1.DocChange.kind", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="doc", - full_name="tests.v1.DocChange.doc", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="old_index", - full_name="tests.v1.DocChange.old_index", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="new_index", - full_name="tests.v1.DocChange.new_index", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCCHANGE_KIND], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2740, - serialized_end=2941, -) - -_TESTSUITE.fields_by_name["tests"].message_type = _TEST -_TEST.fields_by_name["get"].message_type = _GETTEST -_TEST.fields_by_name["create"].message_type = _CREATETEST -_TEST.fields_by_name["set"].message_type = _SETTEST -_TEST.fields_by_name["update"].message_type = _UPDATETEST -_TEST.fields_by_name["update_paths"].message_type = _UPDATEPATHSTEST -_TEST.fields_by_name["delete"].message_type = _DELETETEST -_TEST.fields_by_name["query"].message_type = _QUERYTEST -_TEST.fields_by_name["listen"].message_type = _LISTENTEST -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["get"]) -_TEST.fields_by_name["get"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["create"]) -_TEST.fields_by_name["create"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["set"]) -_TEST.fields_by_name["set"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update"]) -_TEST.fields_by_name["update"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update_paths"]) -_TEST.fields_by_name["update_paths"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["delete"]) -_TEST.fields_by_name["delete"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["query"]) -_TEST.fields_by_name["query"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["listen"]) -_TEST.fields_by_name["listen"].containing_oneof = _TEST.oneofs_by_name["test"] -_GETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._GETDOCUMENTREQUEST -) -_CREATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETTEST.fields_by_name["option"].message_type = _SETOPTION -_SETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATEPATHSTEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATEPATHSTEST.fields_by_name["field_paths"].message_type = _FIELDPATH -_UPDATEPATHSTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_DELETETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_DELETETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETOPTION.fields_by_name["fields"].message_type = _FIELDPATH -_QUERYTEST.fields_by_name["clauses"].message_type = _CLAUSE -_QUERYTEST.fields_by_name[ - "query" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2._STRUCTUREDQUERY -) -_CLAUSE.fields_by_name["select"].message_type = _SELECT -_CLAUSE.fields_by_name["where"].message_type = _WHERE -_CLAUSE.fields_by_name["order_by"].message_type = _ORDERBY -_CLAUSE.fields_by_name["start_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["start_after"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_before"].message_type = _CURSOR -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["select"]) -_CLAUSE.fields_by_name["select"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["where"]) -_CLAUSE.fields_by_name["where"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["order_by"]) -_CLAUSE.fields_by_name["order_by"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["offset"]) -_CLAUSE.fields_by_name["offset"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["limit"]) -_CLAUSE.fields_by_name["limit"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_at"]) -_CLAUSE.fields_by_name["start_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_after"]) -_CLAUSE.fields_by_name["start_after"].containing_oneof = _CLAUSE.oneofs_by_name[ - "clause" -] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_at"]) -_CLAUSE.fields_by_name["end_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_before"]) -_CLAUSE.fields_by_name["end_before"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_SELECT.fields_by_name["fields"].message_type = _FIELDPATH -_WHERE.fields_by_name["path"].message_type = _FIELDPATH -_ORDERBY.fields_by_name["path"].message_type = _FIELDPATH -_CURSOR.fields_by_name["doc_snapshot"].message_type = _DOCSNAPSHOT -_LISTENTEST.fields_by_name[ - "responses" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._LISTENRESPONSE -) -_LISTENTEST.fields_by_name["snapshots"].message_type = _SNAPSHOT -_SNAPSHOT.fields_by_name[ - "docs" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2._DOCUMENT -) -_SNAPSHOT.fields_by_name["changes"].message_type = _DOCCHANGE -_SNAPSHOT.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DOCCHANGE.fields_by_name["kind"].enum_type = _DOCCHANGE_KIND -_DOCCHANGE.fields_by_name[ - "doc" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2._DOCUMENT -) -_DOCCHANGE_KIND.containing_type = _DOCCHANGE -DESCRIPTOR.message_types_by_name["TestSuite"] = _TESTSUITE -DESCRIPTOR.message_types_by_name["Test"] = _TEST -DESCRIPTOR.message_types_by_name["GetTest"] = _GETTEST -DESCRIPTOR.message_types_by_name["CreateTest"] = _CREATETEST -DESCRIPTOR.message_types_by_name["SetTest"] = _SETTEST -DESCRIPTOR.message_types_by_name["UpdateTest"] = _UPDATETEST -DESCRIPTOR.message_types_by_name["UpdatePathsTest"] = _UPDATEPATHSTEST -DESCRIPTOR.message_types_by_name["DeleteTest"] = _DELETETEST -DESCRIPTOR.message_types_by_name["SetOption"] = _SETOPTION -DESCRIPTOR.message_types_by_name["QueryTest"] = _QUERYTEST -DESCRIPTOR.message_types_by_name["Clause"] = _CLAUSE -DESCRIPTOR.message_types_by_name["Select"] = _SELECT -DESCRIPTOR.message_types_by_name["Where"] = _WHERE -DESCRIPTOR.message_types_by_name["OrderBy"] = _ORDERBY -DESCRIPTOR.message_types_by_name["Cursor"] = _CURSOR -DESCRIPTOR.message_types_by_name["DocSnapshot"] = _DOCSNAPSHOT -DESCRIPTOR.message_types_by_name["FieldPath"] = _FIELDPATH -DESCRIPTOR.message_types_by_name["ListenTest"] = _LISTENTEST -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["DocChange"] = _DOCCHANGE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TestSuite = _reflection.GeneratedProtocolMessageType( - "TestSuite", - (_message.Message,), - dict( - DESCRIPTOR=_TESTSUITE, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.TestSuite) - ), -) -_sym_db.RegisterMessage(TestSuite) - -Test = _reflection.GeneratedProtocolMessageType( - "Test", - (_message.Message,), - dict( - DESCRIPTOR=_TEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Test) - ), -) -_sym_db.RegisterMessage(Test) - -GetTest = _reflection.GeneratedProtocolMessageType( - "GetTest", - (_message.Message,), - dict( - DESCRIPTOR=_GETTEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.GetTest) - ), -) -_sym_db.RegisterMessage(GetTest) - -CreateTest = _reflection.GeneratedProtocolMessageType( - "CreateTest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.CreateTest) - ), -) -_sym_db.RegisterMessage(CreateTest) - -SetTest = _reflection.GeneratedProtocolMessageType( - "SetTest", - (_message.Message,), - dict( - DESCRIPTOR=_SETTEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.SetTest) - ), -) -_sym_db.RegisterMessage(SetTest) - -UpdateTest = _reflection.GeneratedProtocolMessageType( - "UpdateTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATETEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.UpdateTest) - ), -) -_sym_db.RegisterMessage(UpdateTest) - -UpdatePathsTest = _reflection.GeneratedProtocolMessageType( - "UpdatePathsTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEPATHSTEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.UpdatePathsTest) - ), -) -_sym_db.RegisterMessage(UpdatePathsTest) - -DeleteTest = _reflection.GeneratedProtocolMessageType( - "DeleteTest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETETEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.DeleteTest) - ), -) -_sym_db.RegisterMessage(DeleteTest) - -SetOption = _reflection.GeneratedProtocolMessageType( - "SetOption", - (_message.Message,), - dict( - DESCRIPTOR=_SETOPTION, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.SetOption) - ), -) -_sym_db.RegisterMessage(SetOption) - -QueryTest = _reflection.GeneratedProtocolMessageType( - "QueryTest", - (_message.Message,), - dict( - DESCRIPTOR=_QUERYTEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.QueryTest) - ), -) -_sym_db.RegisterMessage(QueryTest) - -Clause = _reflection.GeneratedProtocolMessageType( - "Clause", - (_message.Message,), - dict( - DESCRIPTOR=_CLAUSE, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Clause) - ), -) -_sym_db.RegisterMessage(Clause) - -Select = _reflection.GeneratedProtocolMessageType( - "Select", - (_message.Message,), - dict( - DESCRIPTOR=_SELECT, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Select) - ), -) -_sym_db.RegisterMessage(Select) - -Where = _reflection.GeneratedProtocolMessageType( - "Where", - (_message.Message,), - dict( - DESCRIPTOR=_WHERE, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Where) - ), -) -_sym_db.RegisterMessage(Where) - -OrderBy = _reflection.GeneratedProtocolMessageType( - "OrderBy", - (_message.Message,), - dict( - DESCRIPTOR=_ORDERBY, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.OrderBy) - ), -) -_sym_db.RegisterMessage(OrderBy) - -Cursor = _reflection.GeneratedProtocolMessageType( - "Cursor", - (_message.Message,), - dict( - DESCRIPTOR=_CURSOR, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Cursor) - ), -) -_sym_db.RegisterMessage(Cursor) - -DocSnapshot = _reflection.GeneratedProtocolMessageType( - "DocSnapshot", - (_message.Message,), - dict( - DESCRIPTOR=_DOCSNAPSHOT, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.DocSnapshot) - ), -) -_sym_db.RegisterMessage(DocSnapshot) - -FieldPath = _reflection.GeneratedProtocolMessageType( - "FieldPath", - (_message.Message,), - dict( - DESCRIPTOR=_FIELDPATH, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.FieldPath) - ), -) -_sym_db.RegisterMessage(FieldPath) - -ListenTest = _reflection.GeneratedProtocolMessageType( - "ListenTest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTENTEST, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.ListenTest) - ), -) -_sym_db.RegisterMessage(ListenTest) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOT, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.Snapshot) - ), -) -_sym_db.RegisterMessage(Snapshot) - -DocChange = _reflection.GeneratedProtocolMessageType( - "DocChange", - (_message.Message,), - dict( - DESCRIPTOR=_DOCCHANGE, - __module__="test_v1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1.DocChange) - ), -) -_sym_db.RegisterMessage(DocChange) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - '\n&com.google.cloud.firestore.conformance\252\002"Google.Cloud.Firestore.Tests.Proto\312\002(Google\\Cloud\\Firestore\\Tests\\Conformance' - ), -) -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1/proto/tests_pb2.py b/firestore/google/cloud/firestore_v1/proto/tests_pb2.py deleted file mode 100644 index 126887881e53..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/tests_pb2.py +++ /dev/null @@ -1,2208 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/tests.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1.proto import ( - firestore_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2, -) -from google.cloud.firestore_v1.proto import ( - query_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/tests.proto", - package="google.cloud.firestore_v1.proto", - syntax="proto3", - serialized_pb=_b( - '\n+google/cloud/firestore_v1/proto/tests.proto\x12\x1fgoogle.cloud.firestore_v1.proto\x1a,google/cloud/firestore_v1/proto/common.proto\x1a.google/cloud/firestore_v1/proto/document.proto\x1a/google/cloud/firestore_v1/proto/firestore.proto\x1a+google/cloud/firestore_v1/proto/query.proto\x1a\x1fgoogle/protobuf/timestamp.proto"@\n\x08TestFile\x12\x34\n\x05tests\x18\x01 \x03(\x0b\x32%.google.cloud.firestore_v1.proto.Test"\xa9\x04\n\x04Test\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\n \x01(\t\x12\x37\n\x03get\x18\x02 \x01(\x0b\x32(.google.cloud.firestore_v1.proto.GetTestH\x00\x12=\n\x06\x63reate\x18\x03 \x01(\x0b\x32+.google.cloud.firestore_v1.proto.CreateTestH\x00\x12\x37\n\x03set\x18\x04 \x01(\x0b\x32(.google.cloud.firestore_v1.proto.SetTestH\x00\x12=\n\x06update\x18\x05 \x01(\x0b\x32+.google.cloud.firestore_v1.proto.UpdateTestH\x00\x12H\n\x0cupdate_paths\x18\x06 \x01(\x0b\x32\x30.google.cloud.firestore_v1.proto.UpdatePathsTestH\x00\x12=\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32+.google.cloud.firestore_v1.proto.DeleteTestH\x00\x12;\n\x05query\x18\x08 \x01(\x0b\x32*.google.cloud.firestore_v1.proto.QueryTestH\x00\x12=\n\x06listen\x18\t \x01(\x0b\x32+.google.cloud.firestore_v1.proto.ListenTestH\x00\x42\x06\n\x04test"Y\n\x07GetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x38\n\x07request\x18\x02 \x01(\x0b\x32\'.google.firestore.v1.GetDocumentRequest"|\n\nCreateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t\x12\x33\n\x07request\x18\x03 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\xb5\x01\n\x07SetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12:\n\x06option\x18\x02 \x01(\x0b\x32*.google.cloud.firestore_v1.proto.SetOption\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x33\n\x07request\x18\x04 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xb5\x01\n\nUpdateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x33\n\x07request\x18\x04 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xfd\x01\n\x0fUpdatePathsTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12?\n\x0b\x66ield_paths\x18\x03 \x03(\x0b\x32*.google.cloud.firestore_v1.proto.FieldPath\x12\x13\n\x0bjson_values\x18\x04 \x03(\t\x12\x33\n\x07request\x18\x05 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x06 \x01(\x08"\xa2\x01\n\nDeleteTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x37\n\x0cprecondition\x18\x02 \x01(\x0b\x32!.google.firestore.v1.Precondition\x12\x33\n\x07request\x18\x03 \x01(\x0b\x32".google.firestore.v1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"T\n\tSetOption\x12\x0b\n\x03\x61ll\x18\x01 \x01(\x08\x12:\n\x06\x66ields\x18\x02 \x03(\x0b\x32*.google.cloud.firestore_v1.proto.FieldPath"\x9f\x01\n\tQueryTest\x12\x11\n\tcoll_path\x18\x01 \x01(\t\x12\x38\n\x07\x63lauses\x18\x02 \x03(\x0b\x32\'.google.cloud.firestore_v1.proto.Clause\x12\x33\n\x05query\x18\x03 \x01(\x0b\x32$.google.firestore.v1.StructuredQuery\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\xde\x03\n\x06\x43lause\x12\x39\n\x06select\x18\x01 \x01(\x0b\x32\'.google.cloud.firestore_v1.proto.SelectH\x00\x12\x37\n\x05where\x18\x02 \x01(\x0b\x32&.google.cloud.firestore_v1.proto.WhereH\x00\x12<\n\x08order_by\x18\x03 \x01(\x0b\x32(.google.cloud.firestore_v1.proto.OrderByH\x00\x12\x10\n\x06offset\x18\x04 \x01(\x05H\x00\x12\x0f\n\x05limit\x18\x05 \x01(\x05H\x00\x12;\n\x08start_at\x18\x06 \x01(\x0b\x32\'.google.cloud.firestore_v1.proto.CursorH\x00\x12>\n\x0bstart_after\x18\x07 \x01(\x0b\x32\'.google.cloud.firestore_v1.proto.CursorH\x00\x12\x39\n\x06\x65nd_at\x18\x08 \x01(\x0b\x32\'.google.cloud.firestore_v1.proto.CursorH\x00\x12=\n\nend_before\x18\t \x01(\x0b\x32\'.google.cloud.firestore_v1.proto.CursorH\x00\x42\x08\n\x06\x63lause"D\n\x06Select\x12:\n\x06\x66ields\x18\x01 \x03(\x0b\x32*.google.cloud.firestore_v1.proto.FieldPath"a\n\x05Where\x12\x38\n\x04path\x18\x01 \x01(\x0b\x32*.google.cloud.firestore_v1.proto.FieldPath\x12\n\n\x02op\x18\x02 \x01(\t\x12\x12\n\njson_value\x18\x03 \x01(\t"V\n\x07OrderBy\x12\x38\n\x04path\x18\x01 \x01(\x0b\x32*.google.cloud.firestore_v1.proto.FieldPath\x12\x11\n\tdirection\x18\x02 \x01(\t"a\n\x06\x43ursor\x12\x42\n\x0c\x64oc_snapshot\x18\x01 \x01(\x0b\x32,.google.cloud.firestore_v1.proto.DocSnapshot\x12\x13\n\x0bjson_values\x18\x02 \x03(\t".\n\x0b\x44ocSnapshot\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t"\x1a\n\tFieldPath\x12\r\n\x05\x66ield\x18\x01 \x03(\t"\x94\x01\n\nListenTest\x12\x36\n\tresponses\x18\x01 \x03(\x0b\x32#.google.firestore.v1.ListenResponse\x12<\n\tsnapshots\x18\x02 \x03(\x0b\x32).google.cloud.firestore_v1.proto.Snapshot\x12\x10\n\x08is_error\x18\x03 \x01(\x08"\xa3\x01\n\x08Snapshot\x12+\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x1d.google.firestore.v1.Document\x12;\n\x07\x63hanges\x18\x02 \x03(\x0b\x32*.google.cloud.firestore_v1.proto.DocChange\x12-\n\tread_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xe0\x01\n\tDocChange\x12=\n\x04kind\x18\x01 \x01(\x0e\x32/.google.cloud.firestore_v1.proto.DocChange.Kind\x12*\n\x03\x64oc\x18\x02 \x01(\x0b\x32\x1d.google.firestore.v1.Document\x12\x11\n\told_index\x18\x03 \x01(\x05\x12\x11\n\tnew_index\x18\x04 \x01(\x05"B\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42\x8b\x01\n)com.google.cloud.conformance.firestore.v1B\x0eTestDefinition\xaa\x02"Google.Cloud.Firestore.Tests.Proto\xca\x02(Google\\Cloud\\Firestore\\Tests\\Conformanceb\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_DOCCHANGE_KIND = _descriptor.EnumDescriptor( - name="Kind", - full_name="google.cloud.firestore_v1.proto.DocChange.Kind", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="KIND_UNSPECIFIED", index=0, number=0, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ADDED", index=1, number=1, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REMOVED", index=2, number=2, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="MODIFIED", index=3, number=3, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=3566, - serialized_end=3632, -) -_sym_db.RegisterEnumDescriptor(_DOCCHANGE_KIND) - - -_TESTFILE = _descriptor.Descriptor( - name="TestFile", - full_name="google.cloud.firestore_v1.proto.TestFile", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="tests", - full_name="google.cloud.firestore_v1.proto.TestFile.tests", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=301, - serialized_end=365, -) - - -_TEST = _descriptor.Descriptor( - name="Test", - full_name="google.cloud.firestore_v1.proto.Test", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="description", - full_name="google.cloud.firestore_v1.proto.Test.description", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="comment", - full_name="google.cloud.firestore_v1.proto.Test.comment", - index=1, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="get", - full_name="google.cloud.firestore_v1.proto.Test.get", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="google.cloud.firestore_v1.proto.Test.create", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="set", - full_name="google.cloud.firestore_v1.proto.Test.set", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.cloud.firestore_v1.proto.Test.update", - index=5, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_paths", - full_name="google.cloud.firestore_v1.proto.Test.update_paths", - index=6, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="google.cloud.firestore_v1.proto.Test.delete", - index=7, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="google.cloud.firestore_v1.proto.Test.query", - index=8, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="listen", - full_name="google.cloud.firestore_v1.proto.Test.listen", - index=9, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="test", - full_name="google.cloud.firestore_v1.proto.Test.test", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=368, - serialized_end=921, -) - - -_GETTEST = _descriptor.Descriptor( - name="GetTest", - full_name="google.cloud.firestore_v1.proto.GetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.GetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.GetTest.request", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=923, - serialized_end=1012, -) - - -_CREATETEST = _descriptor.Descriptor( - name="CreateTest", - full_name="google.cloud.firestore_v1.proto.CreateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.CreateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="google.cloud.firestore_v1.proto.CreateTest.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.CreateTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.CreateTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1014, - serialized_end=1138, -) - - -_SETTEST = _descriptor.Descriptor( - name="SetTest", - full_name="google.cloud.firestore_v1.proto.SetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.SetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="option", - full_name="google.cloud.firestore_v1.proto.SetTest.option", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="google.cloud.firestore_v1.proto.SetTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.SetTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.SetTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1141, - serialized_end=1322, -) - - -_UPDATETEST = _descriptor.Descriptor( - name="UpdateTest", - full_name="google.cloud.firestore_v1.proto.UpdateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.UpdateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="google.cloud.firestore_v1.proto.UpdateTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="google.cloud.firestore_v1.proto.UpdateTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.UpdateTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.UpdateTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1325, - serialized_end=1506, -) - - -_UPDATEPATHSTEST = _descriptor.Descriptor( - name="UpdatePathsTest", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_paths", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.field_paths", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.json_values", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.request", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.UpdatePathsTest.is_error", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1509, - serialized_end=1762, -) - - -_DELETETEST = _descriptor.Descriptor( - name="DeleteTest", - full_name="google.cloud.firestore_v1.proto.DeleteTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="google.cloud.firestore_v1.proto.DeleteTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="google.cloud.firestore_v1.proto.DeleteTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="google.cloud.firestore_v1.proto.DeleteTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.DeleteTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1765, - serialized_end=1927, -) - - -_SETOPTION = _descriptor.Descriptor( - name="SetOption", - full_name="google.cloud.firestore_v1.proto.SetOption", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="all", - full_name="google.cloud.firestore_v1.proto.SetOption.all", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="google.cloud.firestore_v1.proto.SetOption.fields", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1929, - serialized_end=2013, -) - - -_QUERYTEST = _descriptor.Descriptor( - name="QueryTest", - full_name="google.cloud.firestore_v1.proto.QueryTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="coll_path", - full_name="google.cloud.firestore_v1.proto.QueryTest.coll_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="clauses", - full_name="google.cloud.firestore_v1.proto.QueryTest.clauses", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="google.cloud.firestore_v1.proto.QueryTest.query", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.QueryTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2016, - serialized_end=2175, -) - - -_CLAUSE = _descriptor.Descriptor( - name="Clause", - full_name="google.cloud.firestore_v1.proto.Clause", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="select", - full_name="google.cloud.firestore_v1.proto.Clause.select", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="where", - full_name="google.cloud.firestore_v1.proto.Clause.where", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.cloud.firestore_v1.proto.Clause.order_by", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="offset", - full_name="google.cloud.firestore_v1.proto.Clause.offset", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="limit", - full_name="google.cloud.firestore_v1.proto.Clause.limit", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_at", - full_name="google.cloud.firestore_v1.proto.Clause.start_at", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_after", - full_name="google.cloud.firestore_v1.proto.Clause.start_after", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_at", - full_name="google.cloud.firestore_v1.proto.Clause.end_at", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_before", - full_name="google.cloud.firestore_v1.proto.Clause.end_before", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="clause", - full_name="google.cloud.firestore_v1.proto.Clause.clause", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=2178, - serialized_end=2656, -) - - -_SELECT = _descriptor.Descriptor( - name="Select", - full_name="google.cloud.firestore_v1.proto.Select", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.cloud.firestore_v1.proto.Select.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2658, - serialized_end=2726, -) - - -_WHERE = _descriptor.Descriptor( - name="Where", - full_name="google.cloud.firestore_v1.proto.Where", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="google.cloud.firestore_v1.proto.Where.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="op", - full_name="google.cloud.firestore_v1.proto.Where.op", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_value", - full_name="google.cloud.firestore_v1.proto.Where.json_value", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2728, - serialized_end=2825, -) - - -_ORDERBY = _descriptor.Descriptor( - name="OrderBy", - full_name="google.cloud.firestore_v1.proto.OrderBy", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="google.cloud.firestore_v1.proto.OrderBy.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="direction", - full_name="google.cloud.firestore_v1.proto.OrderBy.direction", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2827, - serialized_end=2913, -) - - -_CURSOR = _descriptor.Descriptor( - name="Cursor", - full_name="google.cloud.firestore_v1.proto.Cursor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_snapshot", - full_name="google.cloud.firestore_v1.proto.Cursor.doc_snapshot", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="google.cloud.firestore_v1.proto.Cursor.json_values", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2915, - serialized_end=3012, -) - - -_DOCSNAPSHOT = _descriptor.Descriptor( - name="DocSnapshot", - full_name="google.cloud.firestore_v1.proto.DocSnapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="google.cloud.firestore_v1.proto.DocSnapshot.path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="google.cloud.firestore_v1.proto.DocSnapshot.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3014, - serialized_end=3060, -) - - -_FIELDPATH = _descriptor.Descriptor( - name="FieldPath", - full_name="google.cloud.firestore_v1.proto.FieldPath", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="google.cloud.firestore_v1.proto.FieldPath.field", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3062, - serialized_end=3088, -) - - -_LISTENTEST = _descriptor.Descriptor( - name="ListenTest", - full_name="google.cloud.firestore_v1.proto.ListenTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="responses", - full_name="google.cloud.firestore_v1.proto.ListenTest.responses", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="snapshots", - full_name="google.cloud.firestore_v1.proto.ListenTest.snapshots", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="google.cloud.firestore_v1.proto.ListenTest.is_error", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3091, - serialized_end=3239, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="google.cloud.firestore_v1.proto.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="docs", - full_name="google.cloud.firestore_v1.proto.Snapshot.docs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="changes", - full_name="google.cloud.firestore_v1.proto.Snapshot.changes", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.cloud.firestore_v1.proto.Snapshot.read_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3242, - serialized_end=3405, -) - - -_DOCCHANGE = _descriptor.Descriptor( - name="DocChange", - full_name="google.cloud.firestore_v1.proto.DocChange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="kind", - full_name="google.cloud.firestore_v1.proto.DocChange.kind", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="doc", - full_name="google.cloud.firestore_v1.proto.DocChange.doc", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="old_index", - full_name="google.cloud.firestore_v1.proto.DocChange.old_index", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="new_index", - full_name="google.cloud.firestore_v1.proto.DocChange.new_index", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCCHANGE_KIND], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3408, - serialized_end=3632, -) - -_TESTFILE.fields_by_name["tests"].message_type = _TEST -_TEST.fields_by_name["get"].message_type = _GETTEST -_TEST.fields_by_name["create"].message_type = _CREATETEST -_TEST.fields_by_name["set"].message_type = _SETTEST -_TEST.fields_by_name["update"].message_type = _UPDATETEST -_TEST.fields_by_name["update_paths"].message_type = _UPDATEPATHSTEST -_TEST.fields_by_name["delete"].message_type = _DELETETEST -_TEST.fields_by_name["query"].message_type = _QUERYTEST -_TEST.fields_by_name["listen"].message_type = _LISTENTEST -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["get"]) -_TEST.fields_by_name["get"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["create"]) -_TEST.fields_by_name["create"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["set"]) -_TEST.fields_by_name["set"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update"]) -_TEST.fields_by_name["update"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update_paths"]) -_TEST.fields_by_name["update_paths"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["delete"]) -_TEST.fields_by_name["delete"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["query"]) -_TEST.fields_by_name["query"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["listen"]) -_TEST.fields_by_name["listen"].containing_oneof = _TEST.oneofs_by_name["test"] -_GETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._GETDOCUMENTREQUEST -) -_CREATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETTEST.fields_by_name["option"].message_type = _SETOPTION -_SETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATEPATHSTEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATEPATHSTEST.fields_by_name["field_paths"].message_type = _FIELDPATH -_UPDATEPATHSTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_DELETETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2._PRECONDITION -) -_DELETETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETOPTION.fields_by_name["fields"].message_type = _FIELDPATH -_QUERYTEST.fields_by_name["clauses"].message_type = _CLAUSE -_QUERYTEST.fields_by_name[ - "query" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_query__pb2._STRUCTUREDQUERY -) -_CLAUSE.fields_by_name["select"].message_type = _SELECT -_CLAUSE.fields_by_name["where"].message_type = _WHERE -_CLAUSE.fields_by_name["order_by"].message_type = _ORDERBY -_CLAUSE.fields_by_name["start_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["start_after"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_before"].message_type = _CURSOR -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["select"]) -_CLAUSE.fields_by_name["select"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["where"]) -_CLAUSE.fields_by_name["where"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["order_by"]) -_CLAUSE.fields_by_name["order_by"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["offset"]) -_CLAUSE.fields_by_name["offset"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["limit"]) -_CLAUSE.fields_by_name["limit"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_at"]) -_CLAUSE.fields_by_name["start_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_after"]) -_CLAUSE.fields_by_name["start_after"].containing_oneof = _CLAUSE.oneofs_by_name[ - "clause" -] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_at"]) -_CLAUSE.fields_by_name["end_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_before"]) -_CLAUSE.fields_by_name["end_before"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_SELECT.fields_by_name["fields"].message_type = _FIELDPATH -_WHERE.fields_by_name["path"].message_type = _FIELDPATH -_ORDERBY.fields_by_name["path"].message_type = _FIELDPATH -_CURSOR.fields_by_name["doc_snapshot"].message_type = _DOCSNAPSHOT -_LISTENTEST.fields_by_name[ - "responses" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2._LISTENRESPONSE -) -_LISTENTEST.fields_by_name["snapshots"].message_type = _SNAPSHOT -_SNAPSHOT.fields_by_name[ - "docs" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2._DOCUMENT -) -_SNAPSHOT.fields_by_name["changes"].message_type = _DOCCHANGE -_SNAPSHOT.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DOCCHANGE.fields_by_name["kind"].enum_type = _DOCCHANGE_KIND -_DOCCHANGE.fields_by_name[ - "doc" -].message_type = ( - google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2._DOCUMENT -) -_DOCCHANGE_KIND.containing_type = _DOCCHANGE -DESCRIPTOR.message_types_by_name["TestFile"] = _TESTFILE -DESCRIPTOR.message_types_by_name["Test"] = _TEST -DESCRIPTOR.message_types_by_name["GetTest"] = _GETTEST -DESCRIPTOR.message_types_by_name["CreateTest"] = _CREATETEST -DESCRIPTOR.message_types_by_name["SetTest"] = _SETTEST -DESCRIPTOR.message_types_by_name["UpdateTest"] = _UPDATETEST -DESCRIPTOR.message_types_by_name["UpdatePathsTest"] = _UPDATEPATHSTEST -DESCRIPTOR.message_types_by_name["DeleteTest"] = _DELETETEST -DESCRIPTOR.message_types_by_name["SetOption"] = _SETOPTION -DESCRIPTOR.message_types_by_name["QueryTest"] = _QUERYTEST -DESCRIPTOR.message_types_by_name["Clause"] = _CLAUSE -DESCRIPTOR.message_types_by_name["Select"] = _SELECT -DESCRIPTOR.message_types_by_name["Where"] = _WHERE -DESCRIPTOR.message_types_by_name["OrderBy"] = _ORDERBY -DESCRIPTOR.message_types_by_name["Cursor"] = _CURSOR -DESCRIPTOR.message_types_by_name["DocSnapshot"] = _DOCSNAPSHOT -DESCRIPTOR.message_types_by_name["FieldPath"] = _FIELDPATH -DESCRIPTOR.message_types_by_name["ListenTest"] = _LISTENTEST -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["DocChange"] = _DOCCHANGE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TestFile = _reflection.GeneratedProtocolMessageType( - "TestFile", - (_message.Message,), - dict( - DESCRIPTOR=_TESTFILE, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.TestFile) - ), -) -_sym_db.RegisterMessage(TestFile) - -Test = _reflection.GeneratedProtocolMessageType( - "Test", - (_message.Message,), - dict( - DESCRIPTOR=_TEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Test) - ), -) -_sym_db.RegisterMessage(Test) - -GetTest = _reflection.GeneratedProtocolMessageType( - "GetTest", - (_message.Message,), - dict( - DESCRIPTOR=_GETTEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.GetTest) - ), -) -_sym_db.RegisterMessage(GetTest) - -CreateTest = _reflection.GeneratedProtocolMessageType( - "CreateTest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.CreateTest) - ), -) -_sym_db.RegisterMessage(CreateTest) - -SetTest = _reflection.GeneratedProtocolMessageType( - "SetTest", - (_message.Message,), - dict( - DESCRIPTOR=_SETTEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.SetTest) - ), -) -_sym_db.RegisterMessage(SetTest) - -UpdateTest = _reflection.GeneratedProtocolMessageType( - "UpdateTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATETEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.UpdateTest) - ), -) -_sym_db.RegisterMessage(UpdateTest) - -UpdatePathsTest = _reflection.GeneratedProtocolMessageType( - "UpdatePathsTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEPATHSTEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.UpdatePathsTest) - ), -) -_sym_db.RegisterMessage(UpdatePathsTest) - -DeleteTest = _reflection.GeneratedProtocolMessageType( - "DeleteTest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETETEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.DeleteTest) - ), -) -_sym_db.RegisterMessage(DeleteTest) - -SetOption = _reflection.GeneratedProtocolMessageType( - "SetOption", - (_message.Message,), - dict( - DESCRIPTOR=_SETOPTION, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.SetOption) - ), -) -_sym_db.RegisterMessage(SetOption) - -QueryTest = _reflection.GeneratedProtocolMessageType( - "QueryTest", - (_message.Message,), - dict( - DESCRIPTOR=_QUERYTEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.QueryTest) - ), -) -_sym_db.RegisterMessage(QueryTest) - -Clause = _reflection.GeneratedProtocolMessageType( - "Clause", - (_message.Message,), - dict( - DESCRIPTOR=_CLAUSE, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Clause) - ), -) -_sym_db.RegisterMessage(Clause) - -Select = _reflection.GeneratedProtocolMessageType( - "Select", - (_message.Message,), - dict( - DESCRIPTOR=_SELECT, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Select) - ), -) -_sym_db.RegisterMessage(Select) - -Where = _reflection.GeneratedProtocolMessageType( - "Where", - (_message.Message,), - dict( - DESCRIPTOR=_WHERE, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Where) - ), -) -_sym_db.RegisterMessage(Where) - -OrderBy = _reflection.GeneratedProtocolMessageType( - "OrderBy", - (_message.Message,), - dict( - DESCRIPTOR=_ORDERBY, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.OrderBy) - ), -) -_sym_db.RegisterMessage(OrderBy) - -Cursor = _reflection.GeneratedProtocolMessageType( - "Cursor", - (_message.Message,), - dict( - DESCRIPTOR=_CURSOR, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Cursor) - ), -) -_sym_db.RegisterMessage(Cursor) - -DocSnapshot = _reflection.GeneratedProtocolMessageType( - "DocSnapshot", - (_message.Message,), - dict( - DESCRIPTOR=_DOCSNAPSHOT, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.DocSnapshot) - ), -) -_sym_db.RegisterMessage(DocSnapshot) - -FieldPath = _reflection.GeneratedProtocolMessageType( - "FieldPath", - (_message.Message,), - dict( - DESCRIPTOR=_FIELDPATH, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.FieldPath) - ), -) -_sym_db.RegisterMessage(FieldPath) - -ListenTest = _reflection.GeneratedProtocolMessageType( - "ListenTest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTENTEST, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.ListenTest) - ), -) -_sym_db.RegisterMessage(ListenTest) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOT, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.Snapshot) - ), -) -_sym_db.RegisterMessage(Snapshot) - -DocChange = _reflection.GeneratedProtocolMessageType( - "DocChange", - (_message.Message,), - dict( - DESCRIPTOR=_DOCCHANGE, - __module__="google.cloud.firestore_v1.proto.tests_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.firestore_v1.proto.DocChange) - ), -) -_sym_db.RegisterMessage(DocChange) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - '\n)com.google.cloud.conformance.firestore.v1B\016TestDefinition\252\002"Google.Cloud.Firestore.Tests.Proto\312\002(Google\\Cloud\\Firestore\\Tests\\Conformance' - ), -) -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1/proto/write.proto b/firestore/google/cloud/firestore_v1/proto/write.proto deleted file mode 100644 index 51d923918014..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/write.proto +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1; - -import "google/firestore/v1/common.proto"; -import "google/firestore/v1/document.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "WriteProto"; -option java_package = "com.google.firestore.v1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1"; - -// A write on a document. -message Write { - // The operation to execute. - oneof operation { - // A document to write. - Document update = 1; - - // A document name to delete. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string delete = 2; - - // Applies a transformation to a document. - // At most one `transform` per document is allowed in a given request. - // An `update` cannot follow a `transform` on the same document in a given - // request. - DocumentTransform transform = 6; - } - - // The fields to update in this write. - // - // This field can be set only when the operation is `update`. - // If the mask is not set for an `update` and the document exists, any - // existing data will be overwritten. - // If the mask is set and the document on the server has fields not covered by - // the mask, they are left unchanged. - // Fields referenced in the mask, but not present in the input document, are - // deleted from the document on the server. - // The field paths in this mask must not contain a reserved field name. - DocumentMask update_mask = 3; - - // An optional precondition on the document. - // - // The write will fail if this is set and not met by the target document. - Precondition current_document = 4; -} - -// A transformation of a document. -message DocumentTransform { - // A transformation of a field of the document. - message FieldTransform { - // A value that is calculated by the server. - enum ServerValue { - // Unspecified. This value must not be used. - SERVER_VALUE_UNSPECIFIED = 0; - - // The time at which the server processed the request, with millisecond - // precision. - REQUEST_TIME = 1; - } - - // The path of the field. See [Document.fields][google.firestore.v1.Document.fields] for the field path syntax - // reference. - string field_path = 1; - - // The transformation to apply on the field. - oneof transform_type { - // Sets the field to the given server value. - ServerValue set_to_server_value = 2; - - // Adds the given value to the field's current value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If either of the given value or the current field value are doubles, - // both values will be interpreted as doubles. Double arithmetic and - // representation of double values follow IEEE 754 semantics. - // If there is positive/negative integer overflow, the field is resolved - // to the largest magnitude positive/negative integer. - Value increment = 3; - - // Sets the field to the maximum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If a maximum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the larger operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and - // zero input value is always the stored value. - // The maximum of any numeric value x and NaN is NaN. - Value maximum = 4; - - // Sets the field to the minimum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the input value. - // If a minimum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the smaller operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and - // zero input value is always the stored value. - // The minimum of any numeric value x and NaN is NaN. - Value minimum = 5; - - // Append the given elements in order if they are not already present in - // the current field value. - // If the field is not an array, or if the field does not yet exist, it is - // first set to the empty array. - // - // Equivalent numbers of different types (e.g. 3L and 3.0) are - // considered equal when checking if a value is missing. - // NaN is equal to NaN, and Null is equal to Null. - // If the input contains multiple equivalent values, only the first will - // be considered. - // - // The corresponding transform_result will be the null value. - ArrayValue append_missing_elements = 6; - - // Remove all of the given elements from the array in the field. - // If the field is not an array, or if the field does not yet exist, it is - // set to the empty array. - // - // Equivalent numbers of the different types (e.g. 3L and 3.0) are - // considered equal when deciding whether an element should be removed. - // NaN is equal to NaN, and Null is equal to Null. - // This will remove all equivalent values if there are duplicates. - // - // The corresponding transform_result will be the null value. - ArrayValue remove_all_from_array = 7; - } - } - - // The name of the document to transform. - string document = 1; - - // The list of transformations to apply to the fields of the document, in - // order. - // This must not be empty. - repeated FieldTransform field_transforms = 2; -} - -// The result of applying a write. -message WriteResult { - // The last update time of the document after applying the write. Not set - // after a `delete`. - // - // If the write did not actually change the document, this will be the - // previous update_time. - google.protobuf.Timestamp update_time = 1; - - // The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1.DocumentTransform.FieldTransform], in the - // same order. - repeated Value transform_results = 2; -} - -// A [Document][google.firestore.v1.Document] has changed. -// -// May be the result of multiple [writes][google.firestore.v1.Write], including deletes, that -// ultimately resulted in a new value for the [Document][google.firestore.v1.Document]. -// -// Multiple [DocumentChange][google.firestore.v1.DocumentChange] messages may be returned for the same logical -// change, if multiple targets are affected. -message DocumentChange { - // The new state of the [Document][google.firestore.v1.Document]. - // - // If `mask` is set, contains only fields that were updated or added. - Document document = 1; - - // A set of target IDs of targets that match this document. - repeated int32 target_ids = 5; - - // A set of target IDs for targets that no longer match this document. - repeated int32 removed_target_ids = 6; -} - -// A [Document][google.firestore.v1.Document] has been deleted. -// -// May be the result of multiple [writes][google.firestore.v1.Write], including updates, the -// last of which deleted the [Document][google.firestore.v1.Document]. -// -// Multiple [DocumentDelete][google.firestore.v1.DocumentDelete] messages may be returned for the same logical -// delete, if multiple targets are affected. -message DocumentDelete { - // The resource name of the [Document][google.firestore.v1.Document] that was deleted. - string document = 1; - - // A set of target IDs for targets that previously matched this entity. - repeated int32 removed_target_ids = 6; - - // The read timestamp at which the delete was observed. - // - // Greater or equal to the `commit_time` of the delete. - google.protobuf.Timestamp read_time = 4; -} - -// A [Document][google.firestore.v1.Document] has been removed from the view of the targets. -// -// Sent if the document is no longer relevant to a target and is out of view. -// Can be sent instead of a DocumentDelete or a DocumentChange if the server -// can not send the new value of the document. -// -// Multiple [DocumentRemove][google.firestore.v1.DocumentRemove] messages may be returned for the same logical -// write or delete, if multiple targets are affected. -message DocumentRemove { - // The resource name of the [Document][google.firestore.v1.Document] that has gone out of view. - string document = 1; - - // A set of target IDs for targets that previously matched this document. - repeated int32 removed_target_ids = 2; - - // The read timestamp at which the remove was observed. - // - // Greater or equal to the `commit_time` of the change/delete/remove. - google.protobuf.Timestamp read_time = 4; -} - -// A digest of all the documents that match a given target. -message ExistenceFilter { - // The target ID to which this filter applies. - int32 target_id = 1; - - // The total count of documents that match [target_id][google.firestore.v1.ExistenceFilter.target_id]. - // - // If different from the count of documents in the client that match, the - // client must manually determine which documents no longer match the target. - int32 count = 2; -} diff --git a/firestore/google/cloud/firestore_v1/proto/write_pb2.py b/firestore/google/cloud/firestore_v1/proto/write_pb2.py deleted file mode 100644 index 1ed1c44246e2..000000000000 --- a/firestore/google/cloud/firestore_v1/proto/write_pb2.py +++ /dev/null @@ -1,1146 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1/proto/write.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1/proto/write.proto", - package="google.firestore.v1", - syntax="proto3", - serialized_options=_b( - "\n\027com.google.firestore.v1B\nWriteProtoP\001Z=": _operator_enum.GREATER_THAN_OR_EQUAL, - ">": _operator_enum.GREATER_THAN, - "array_contains": _operator_enum.ARRAY_CONTAINS, - "in": _operator_enum.IN, - "array_contains_any": _operator_enum.ARRAY_CONTAINS_ANY, -} -_BAD_OP_STRING = "Operator string {!r} is invalid. Valid choices are: {}." -_BAD_OP_NAN_NULL = 'Only an equality filter ("==") can be used with None or NaN values' -_INVALID_WHERE_TRANSFORM = "Transforms cannot be used as where values." -_BAD_DIR_STRING = "Invalid direction {!r}. Must be one of {!r} or {!r}." -_INVALID_CURSOR_TRANSFORM = "Transforms cannot be used as cursor values." -_MISSING_ORDER_BY = ( - 'The "order by" field path {!r} is not present in the cursor data {!r}. ' - "All fields sent to ``order_by()`` must be present in the fields " - "if passed to one of ``start_at()`` / ``start_after()`` / " - "``end_before()`` / ``end_at()`` to define a cursor." -) -_NO_ORDERS_FOR_CURSOR = ( - "Attempting to create a cursor with no fields to order on. " - "When defining a cursor with one of ``start_at()`` / ``start_after()`` / " - "``end_before()`` / ``end_at()``, all fields in the cursor must " - "come from fields set in ``order_by()``." -) -_MISMATCH_CURSOR_W_ORDER_BY = "The cursor {!r} does not match the order fields {!r}." - - -class Query(object): - """Represents a query to the Firestore API. - - Instances of this class are considered immutable: all methods that - would modify an instance instead return a new instance. - - Args: - parent (:class:`~google.cloud.firestore_v1.collection.CollectionReference`): - The collection that this query applies to. - projection (Optional[:class:`google.cloud.proto.firestore.v1.\ - query_pb2.StructuredQuery.Projection`]): - A projection of document fields to limit the query results to. - field_filters (Optional[Tuple[:class:`google.cloud.proto.firestore.v1.\ - query_pb2.StructuredQuery.FieldFilter`, ...]]): - The filters to be applied in the query. - orders (Optional[Tuple[:class:`google.cloud.proto.firestore.v1.\ - query_pb2.StructuredQuery.Order`, ...]]): - The "order by" entries to use in the query. - limit (Optional[int]): - The maximum number of documents the query is allowed to return. - offset (Optional[int]): - The number of results to skip. - start_at (Optional[Tuple[dict, bool]]): - Two-tuple of : - - * a mapping of fields. Any field that is present in this mapping - must also be present in ``orders`` - * an ``after`` flag - - The fields and the flag combine to form a cursor used as - a starting point in a query result set. If the ``after`` - flag is :data:`True`, the results will start just after any - documents which have fields matching the cursor, otherwise - any matching documents will be included in the result set. - When the query is formed, the document values - will be used in the order given by ``orders``. - end_at (Optional[Tuple[dict, bool]]): - Two-tuple of: - - * a mapping of fields. Any field that is present in this mapping - must also be present in ``orders`` - * a ``before`` flag - - The fields and the flag combine to form a cursor used as - an ending point in a query result set. If the ``before`` - flag is :data:`True`, the results will end just before any - documents which have fields matching the cursor, otherwise - any matching documents will be included in the result set. - When the query is formed, the document values - will be used in the order given by ``orders``. - all_descendants (Optional[bool]): - When false, selects only collections that are immediate children - of the `parent` specified in the containing `RunQueryRequest`. - When true, selects all descendant collections. - """ - - ASCENDING = "ASCENDING" - """str: Sort query results in ascending order on a field.""" - DESCENDING = "DESCENDING" - """str: Sort query results in descending order on a field.""" - - def __init__( - self, - parent, - projection=None, - field_filters=(), - orders=(), - limit=None, - offset=None, - start_at=None, - end_at=None, - all_descendants=False, - ): - self._parent = parent - self._projection = projection - self._field_filters = field_filters - self._orders = orders - self._limit = limit - self._offset = offset - self._start_at = start_at - self._end_at = end_at - self._all_descendants = all_descendants - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - self._parent == other._parent - and self._projection == other._projection - and self._field_filters == other._field_filters - and self._orders == other._orders - and self._limit == other._limit - and self._offset == other._offset - and self._start_at == other._start_at - and self._end_at == other._end_at - and self._all_descendants == other._all_descendants - ) - - @property - def _client(self): - """The client of the parent collection. - - Returns: - :class:`~google.cloud.firestore_v1.client.Client`: - The client that owns this query. - """ - return self._parent._client - - def select(self, field_paths): - """Project documents matching query to a limited set of fields. - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - If the current query already has a projection set (i.e. has already - called :meth:`~google.cloud.firestore_v1.query.Query.select`), this - will overwrite it. - - Args: - field_paths (Iterable[str, ...]): An iterable of field paths - (``.``-delimited list of field names) to use as a projection - of document fields in the query results. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A "projected" query. Acts as a copy of the current query, - modified with the newly added projection. - Raises: - ValueError: If any ``field_path`` is invalid. - """ - field_paths = list(field_paths) - for field_path in field_paths: - field_path_module.split_field_path(field_path) # raises - - new_projection = query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ) - return self.__class__( - self._parent, - projection=new_projection, - field_filters=self._field_filters, - orders=self._orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - all_descendants=self._all_descendants, - ) - - def where(self, field_path, op_string, value): - """Filter the query on a field. - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - Returns a new :class:`~google.cloud.firestore_v1.query.Query` that - filters on a specific field path, according to an operation (e.g. - ``==`` or "equals") and a particular value to be paired with that - operation. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) for the field to filter on. - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=``, ``>``, - ``in``, ``array_contains`` and ``array_contains_any``. - value (Any): The value to compare the field against in the filter. - If ``value`` is :data:`None` or a NaN, then ``==`` is the only - allowed operation. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A filtered query. Acts as a copy of the current query, - modified with the newly added filter. - - Raises: - ValueError: If ``field_path`` is invalid. - ValueError: If ``value`` is a NaN or :data:`None` and - ``op_string`` is not ``==``. - """ - field_path_module.split_field_path(field_path) # raises - - if value is None: - if op_string != _EQ_OP: - raise ValueError(_BAD_OP_NAN_NULL) - filter_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL, - ) - elif _isnan(value): - if op_string != _EQ_OP: - raise ValueError(_BAD_OP_NAN_NULL) - filter_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NAN, - ) - elif isinstance(value, (transforms.Sentinel, transforms._ValueList)): - raise ValueError(_INVALID_WHERE_TRANSFORM) - else: - filter_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=_enum_from_op_string(op_string), - value=_helpers.encode_value(value), - ) - - new_filters = self._field_filters + (filter_pb,) - return self.__class__( - self._parent, - projection=self._projection, - field_filters=new_filters, - orders=self._orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - all_descendants=self._all_descendants, - ) - - @staticmethod - def _make_order(field_path, direction): - """Helper for :meth:`order_by`.""" - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=_enum_from_direction(direction), - ) - - def order_by(self, field_path, direction=ASCENDING): - """Modify the query to add an order clause on a specific field. - - See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for - more information on **field paths**. - - Successive :meth:`~google.cloud.firestore_v1.query.Query.order_by` - calls will further refine the ordering of results returned by the query - (i.e. the new "order by" fields will be added to existing ones). - - Args: - field_path (str): A field path (``.``-delimited list of - field names) on which to order the query results. - direction (Optional[str]): The direction to order by. Must be one - of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to - :attr:`ASCENDING`. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - An ordered query. Acts as a copy of the current query, modified - with the newly added "order by" constraint. - - Raises: - ValueError: If ``field_path`` is invalid. - ValueError: If ``direction`` is not one of :attr:`ASCENDING` or - :attr:`DESCENDING`. - """ - field_path_module.split_field_path(field_path) # raises - - order_pb = self._make_order(field_path, direction) - - new_orders = self._orders + (order_pb,) - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=new_orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - all_descendants=self._all_descendants, - ) - - def limit(self, count): - """Limit a query to return a fixed number of results. - - If the current query already has a limit set, this will overwrite it. - - Args: - count (int): Maximum number of documents to return that match - the query. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A limited query. Acts as a copy of the current query, modified - with the newly added "limit" filter. - """ - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=self._orders, - limit=count, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - all_descendants=self._all_descendants, - ) - - def offset(self, num_to_skip): - """Skip to an offset in a query. - - If the current query already has specified an offset, this will - overwrite it. - - Args: - num_to_skip (int): The number of results to skip at the beginning - of query results. (Must be non-negative.) - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - An offset query. Acts as a copy of the current query, modified - with the newly added "offset" field. - """ - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=self._orders, - limit=self._limit, - offset=num_to_skip, - start_at=self._start_at, - end_at=self._end_at, - all_descendants=self._all_descendants, - ) - - def _check_snapshot(self, document_fields): - """Validate local snapshots for non-collection-group queries. - - Raises: - ValueError: for non-collection-group queries, if the snapshot - is from a different collection. - """ - if self._all_descendants: - return - - if document_fields.reference._path[:-1] != self._parent._path: - raise ValueError("Cannot use snapshot from another collection as a cursor.") - - def _cursor_helper(self, document_fields, before, start): - """Set values to be used for a ``start_at`` or ``end_at`` cursor. - - The values will later be used in a query protobuf. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1.query.Query.order_by`. - - Args: - document_fields - (Union[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`, dict, list, tuple]): - a document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - before (bool): Flag indicating if the document in - ``document_fields`` should (:data:`False`) or - shouldn't (:data:`True`) be included in the result set. - start (Optional[bool]): determines if the cursor is a ``start_at`` - cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`). - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. Acts as a copy of the current query, modified - with the newly added "start at" cursor. - """ - if isinstance(document_fields, tuple): - document_fields = list(document_fields) - elif isinstance(document_fields, document.DocumentSnapshot): - self._check_snapshot(document_fields) - else: - # NOTE: We copy so that the caller can't modify after calling. - document_fields = copy.deepcopy(document_fields) - - cursor_pair = document_fields, before - query_kwargs = { - "projection": self._projection, - "field_filters": self._field_filters, - "orders": self._orders, - "limit": self._limit, - "offset": self._offset, - "all_descendants": self._all_descendants, - } - if start: - query_kwargs["start_at"] = cursor_pair - query_kwargs["end_at"] = self._end_at - else: - query_kwargs["start_at"] = self._start_at - query_kwargs["end_at"] = cursor_pair - - return self.__class__(self._parent, **query_kwargs) - - def start_at(self, document_fields): - """Start query results at a particular document value. - - The result set will **include** the document specified by - ``document_fields``. - - If the current query already has specified a start cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1.query.Query.start_after` -- this - will overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1.query.Query.order_by`. - - Args: - document_fields - (Union[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`, dict, list, tuple]): - a document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. Acts as - a copy of the current query, modified with the newly added - "start at" cursor. - """ - return self._cursor_helper(document_fields, before=True, start=True) - - def start_after(self, document_fields): - """Start query results after a particular document value. - - The result set will **exclude** the document specified by - ``document_fields``. - - If the current query already has specified a start cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1.query.Query.start_at` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1.query.Query.order_by`. - - Args: - document_fields - (Union[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`, dict, list, tuple]): - a document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. Acts as a copy of the current query, modified - with the newly added "start after" cursor. - """ - return self._cursor_helper(document_fields, before=False, start=True) - - def end_before(self, document_fields): - """End query results before a particular document value. - - The result set will **exclude** the document specified by - ``document_fields``. - - If the current query already has specified an end cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1.query.Query.end_at` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1.query.Query.order_by`. - - Args: - document_fields - (Union[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`, dict, list, tuple]): - a document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. Acts as a copy of the current query, modified - with the newly added "end before" cursor. - """ - return self._cursor_helper(document_fields, before=True, start=False) - - def end_at(self, document_fields): - """End query results at a particular document value. - - The result set will **include** the document specified by - ``document_fields``. - - If the current query already has specified an end cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1.query.Query.end_before` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1.query.Query.order_by`. - - Args: - document_fields - (Union[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`, dict, list, tuple]): - a document snapshot or a dictionary/list/tuple of fields - representing a query results cursor. A cursor is a collection - of values that represent a position in a query result set. - - Returns: - :class:`~google.cloud.firestore_v1.query.Query`: - A query with cursor. Acts as a copy of the current query, modified - with the newly added "end at" cursor. - """ - return self._cursor_helper(document_fields, before=False, start=False) - - def _filters_pb(self): - """Convert all the filters into a single generic Filter protobuf. - - This may be a lone field filter or unary filter, may be a composite - filter or may be :data:`None`. - - Returns: - :class:`google.cloud.firestore_v1.types.StructuredQuery.Filter`: - A "generic" filter representing the current query's filters. - """ - num_filters = len(self._field_filters) - if num_filters == 0: - return None - elif num_filters == 1: - return _filter_pb(self._field_filters[0]) - else: - composite_filter = query_pb2.StructuredQuery.CompositeFilter( - op=enums.StructuredQuery.CompositeFilter.Operator.AND, - filters=[_filter_pb(filter_) for filter_ in self._field_filters], - ) - return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter) - - @staticmethod - def _normalize_projection(projection): - """Helper: convert field paths to message.""" - if projection is not None: - - fields = list(projection.fields) - - if not fields: - field_ref = query_pb2.StructuredQuery.FieldReference( - field_path="__name__" - ) - return query_pb2.StructuredQuery.Projection(fields=[field_ref]) - - return projection - - def _normalize_orders(self): - """Helper: adjust orders based on cursors, where clauses.""" - orders = list(self._orders) - _has_snapshot_cursor = False - - if self._start_at: - if isinstance(self._start_at[0], document.DocumentSnapshot): - _has_snapshot_cursor = True - - if self._end_at: - if isinstance(self._end_at[0], document.DocumentSnapshot): - _has_snapshot_cursor = True - - if _has_snapshot_cursor: - should_order = [ - _enum_from_op_string(key) - for key in _COMPARISON_OPERATORS - if key not in (_EQ_OP, "array_contains") - ] - order_keys = [order.field.field_path for order in orders] - for filter_ in self._field_filters: - field = filter_.field.field_path - if filter_.op in should_order and field not in order_keys: - orders.append(self._make_order(field, "ASCENDING")) - if not orders: - orders.append(self._make_order("__name__", "ASCENDING")) - else: - order_keys = [order.field.field_path for order in orders] - if "__name__" not in order_keys: - direction = orders[-1].direction # enum? - orders.append(self._make_order("__name__", direction)) - - return orders - - def _normalize_cursor(self, cursor, orders): - """Helper: convert cursor to a list of values based on orders.""" - if cursor is None: - return - - if not orders: - raise ValueError(_NO_ORDERS_FOR_CURSOR) - - document_fields, before = cursor - - order_keys = [order.field.field_path for order in orders] - - if isinstance(document_fields, document.DocumentSnapshot): - snapshot = document_fields - document_fields = snapshot.to_dict() - document_fields["__name__"] = snapshot.reference - - if isinstance(document_fields, dict): - # Transform to list using orders - values = [] - data = document_fields - for order_key in order_keys: - try: - if order_key in data: - values.append(data[order_key]) - else: - values.append( - field_path_module.get_nested_value(order_key, data) - ) - except KeyError: - msg = _MISSING_ORDER_BY.format(order_key, data) - raise ValueError(msg) - document_fields = values - - if len(document_fields) != len(orders): - msg = _MISMATCH_CURSOR_W_ORDER_BY.format(document_fields, order_keys) - raise ValueError(msg) - - _transform_bases = (transforms.Sentinel, transforms._ValueList) - - for index, key_field in enumerate(zip(order_keys, document_fields)): - key, field = key_field - - if isinstance(field, _transform_bases): - msg = _INVALID_CURSOR_TRANSFORM - raise ValueError(msg) - - if key == "__name__" and isinstance(field, six.string_types): - document_fields[index] = self._parent.document(field) - - return document_fields, before - - def _to_protobuf(self): - """Convert the current query into the equivalent protobuf. - - Returns: - :class:`google.cloud.firestore_v1.types.StructuredQuery`: - The query protobuf. - """ - projection = self._normalize_projection(self._projection) - orders = self._normalize_orders() - start_at = self._normalize_cursor(self._start_at, orders) - end_at = self._normalize_cursor(self._end_at, orders) - - query_kwargs = { - "select": projection, - "from": [ - query_pb2.StructuredQuery.CollectionSelector( - collection_id=self._parent.id, all_descendants=self._all_descendants - ) - ], - "where": self._filters_pb(), - "order_by": orders, - "start_at": _cursor_pb(start_at), - "end_at": _cursor_pb(end_at), - } - if self._offset is not None: - query_kwargs["offset"] = self._offset - if self._limit is not None: - query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit) - - return query_pb2.StructuredQuery(**query_kwargs) - - def get(self, transaction=None): - """Deprecated alias for :meth:`stream`.""" - warnings.warn( - "'Query.get' is deprecated: please use 'Query.stream' instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.stream(transaction=transaction) - - def stream(self, transaction=None): - """Read the documents in the collection that match this query. - - This sends a ``RunQuery`` RPC and then returns an iterator which - consumes each document returned in the stream of ``RunQueryResponse`` - messages. - - .. note:: - - The underlying stream of responses will time out after - the ``max_rpc_timeout_millis`` value set in the GAPIC - client configuration for the ``RunQuery`` API. Snapshots - not consumed from the iterator before that point will be lost. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - transaction - (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]): - An existing transaction that this query will run in. - - Yields: - :class:`~google.cloud.firestore_v1.document.DocumentSnapshot`: - The next document that fulfills the query. - """ - parent_path, expected_prefix = self._parent._parent_info() - response_iterator = self._client._firestore_api.run_query( - parent_path, - self._to_protobuf(), - transaction=_helpers.get_transaction_id(transaction), - metadata=self._client._rpc_metadata, - ) - - for response in response_iterator: - if self._all_descendants: - snapshot = _collection_group_query_response_to_snapshot( - response, self._parent - ) - else: - snapshot = _query_response_to_snapshot( - response, self._parent, expected_prefix - ) - if snapshot is not None: - yield snapshot - - def on_snapshot(self, callback): - """Monitor the documents in this collection that match this query. - - This starts a watch on this query using a background thread. The - provided callback is run on the snapshot of the documents. - - Args: - callback(Callable[[:class:`~google.cloud.firestore.query.QuerySnapshot`], NoneType]): - a callback to run when a change occurs. - - Example: - - .. code-block:: python - - from google.cloud import firestore_v1 - - db = firestore_v1.Client() - query_ref = db.collection(u'users').where("user", "==", u'Ada') - - def on_snapshot(docs, changes, read_time): - for doc in docs: - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - # Watch this query - query_watch = query_ref.on_snapshot(on_snapshot) - - # Terminate this watch - query_watch.unsubscribe() - """ - return Watch.for_query( - self, callback, document.DocumentSnapshot, document.DocumentReference - ) - - def _comparator(self, doc1, doc2): - _orders = self._orders - - # Add implicit sorting by name, using the last specified direction. - if len(_orders) == 0: - lastDirection = Query.ASCENDING - else: - if _orders[-1].direction == 1: - lastDirection = Query.ASCENDING - else: - lastDirection = Query.DESCENDING - - orderBys = list(_orders) - - order_pb = query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path="id"), - direction=_enum_from_direction(lastDirection), - ) - orderBys.append(order_pb) - - for orderBy in orderBys: - if orderBy.field.field_path == "id": - # If ordering by docuent id, compare resource paths. - comp = Order()._compare_to(doc1.reference._path, doc2.reference._path) - else: - if ( - orderBy.field.field_path not in doc1._data - or orderBy.field.field_path not in doc2._data - ): - raise ValueError( - "Can only compare fields that exist in the " - "DocumentSnapshot. Please include the fields you are " - "ordering on in your select() call." - ) - v1 = doc1._data[orderBy.field.field_path] - v2 = doc2._data[orderBy.field.field_path] - encoded_v1 = _helpers.encode_value(v1) - encoded_v2 = _helpers.encode_value(v2) - comp = Order().compare(encoded_v1, encoded_v2) - - if comp != 0: - # 1 == Ascending, -1 == Descending - return orderBy.direction * comp - - return 0 - - -def _enum_from_op_string(op_string): - """Convert a string representation of a binary operator to an enum. - - These enums come from the protobuf message definition - ``StructuredQuery.FieldFilter.Operator``. - - Args: - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=`` - and ``>``. - - Returns: - int: The enum corresponding to ``op_string``. - - Raises: - ValueError: If ``op_string`` is not a valid operator. - """ - try: - return _COMPARISON_OPERATORS[op_string] - except KeyError: - choices = ", ".join(sorted(_COMPARISON_OPERATORS.keys())) - msg = _BAD_OP_STRING.format(op_string, choices) - raise ValueError(msg) - - -def _isnan(value): - """Check if a value is NaN. - - This differs from ``math.isnan`` in that **any** input type is - allowed. - - Args: - value (Any): A value to check for NaN-ness. - - Returns: - bool: Indicates if the value is the NaN float. - """ - if isinstance(value, float): - return math.isnan(value) - else: - return False - - -def _enum_from_direction(direction): - """Convert a string representation of a direction to an enum. - - Args: - direction (str): A direction to order by. Must be one of - :attr:`~google.cloud.firestore.Query.ASCENDING` or - :attr:`~google.cloud.firestore.Query.DESCENDING`. - - Returns: - int: The enum corresponding to ``direction``. - - Raises: - ValueError: If ``direction`` is not a valid direction. - """ - if isinstance(direction, int): - return direction - - if direction == Query.ASCENDING: - return enums.StructuredQuery.Direction.ASCENDING - elif direction == Query.DESCENDING: - return enums.StructuredQuery.Direction.DESCENDING - else: - msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING) - raise ValueError(msg) - - -def _filter_pb(field_or_unary): - """Convert a specific protobuf filter to the generic filter type. - - Args: - field_or_unary (Union[google.cloud.proto.firestore.v1.\ - query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ - firestore.v1.query_pb2.StructuredQuery.FieldFilter]): A - field or unary filter to convert to a generic filter. - - Returns: - google.cloud.firestore_v1.types.\ - StructuredQuery.Filter: A "generic" filter. - - Raises: - ValueError: If ``field_or_unary`` is not a field or unary filter. - """ - if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): - return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) - elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): - return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) - else: - raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary) - - -def _cursor_pb(cursor_pair): - """Convert a cursor pair to a protobuf. - - If ``cursor_pair`` is :data:`None`, just returns :data:`None`. - - Args: - cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of - - * a list of field values. - * a ``before`` flag - - Returns: - Optional[google.cloud.firestore_v1.types.Cursor]: A - protobuf cursor corresponding to the values. - """ - if cursor_pair is not None: - data, before = cursor_pair - value_pbs = [_helpers.encode_value(value) for value in data] - return query_pb2.Cursor(values=value_pbs, before=before) - - -def _query_response_to_snapshot(response_pb, collection, expected_prefix): - """Parse a query response protobuf to a document snapshot. - - Args: - response_pb (google.cloud.proto.firestore.v1.\ - firestore_pb2.RunQueryResponse): A - collection (:class:`~google.cloud.firestore_v1.collection.CollectionReference`): - A reference to the collection that initiated the query. - expected_prefix (str): The expected prefix for fully-qualified - document names returned in the query results. This can be computed - directly from ``collection`` via :meth:`_parent_info`. - - Returns: - Optional[:class:`~google.cloud.firestore.document.DocumentSnapshot`]: - A snapshot of the data returned in the query. If - ``response_pb.document`` is not set, the snapshot will be :data:`None`. - """ - if not response_pb.HasField("document"): - return None - - document_id = _helpers.get_doc_id(response_pb.document, expected_prefix) - reference = collection.document(document_id) - data = _helpers.decode_dict(response_pb.document.fields, collection._client) - snapshot = document.DocumentSnapshot( - reference, - data, - exists=True, - read_time=response_pb.read_time, - create_time=response_pb.document.create_time, - update_time=response_pb.document.update_time, - ) - return snapshot - - -def _collection_group_query_response_to_snapshot(response_pb, collection): - """Parse a query response protobuf to a document snapshot. - - Args: - response_pb (google.cloud.proto.firestore.v1.\ - firestore_pb2.RunQueryResponse): A - collection (:class:`~google.cloud.firestore_v1.collection.CollectionReference`): - A reference to the collection that initiated the query. - - Returns: - Optional[:class:`~google.cloud.firestore.document.DocumentSnapshot`]: - A snapshot of the data returned in the query. If - ``response_pb.document`` is not set, the snapshot will be :data:`None`. - """ - if not response_pb.HasField("document"): - return None - reference = collection._client.document(response_pb.document.name) - data = _helpers.decode_dict(response_pb.document.fields, collection._client) - snapshot = document.DocumentSnapshot( - reference, - data, - exists=True, - read_time=response_pb.read_time, - create_time=response_pb.document.create_time, - update_time=response_pb.document.update_time, - ) - return snapshot diff --git a/firestore/google/cloud/firestore_v1/transaction.py b/firestore/google/cloud/firestore_v1/transaction.py deleted file mode 100644 index 9d4068c75a88..000000000000 --- a/firestore/google/cloud/firestore_v1/transaction.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpers for applying Google Cloud Firestore changes in a transaction.""" - - -import random -import time - -import six - -from google.api_core import exceptions -from google.cloud.firestore_v1 import batch -from google.cloud.firestore_v1 import types -from google.cloud.firestore_v1.document import DocumentReference -from google.cloud.firestore_v1.query import Query - - -MAX_ATTEMPTS = 5 -"""int: Default number of transaction attempts (with retries).""" -_CANT_BEGIN = "The transaction has already begun. Current transaction ID: {!r}." -_MISSING_ID_TEMPLATE = "The transaction has no transaction ID, so it cannot be {}." -_CANT_ROLLBACK = _MISSING_ID_TEMPLATE.format("rolled back") -_CANT_COMMIT = _MISSING_ID_TEMPLATE.format("committed") -_WRITE_READ_ONLY = "Cannot perform write operation in read-only transaction." -_INITIAL_SLEEP = 1.0 -"""float: Initial "max" for sleep interval. To be used in :func:`_sleep`.""" -_MAX_SLEEP = 30.0 -"""float: Eventual "max" sleep time. To be used in :func:`_sleep`.""" -_MULTIPLIER = 2.0 -"""float: Multiplier for exponential backoff. To be used in :func:`_sleep`.""" -_EXCEED_ATTEMPTS_TEMPLATE = "Failed to commit transaction in {:d} attempts." -_CANT_RETRY_READ_ONLY = "Only read-write transactions can be retried." - - -class Transaction(batch.WriteBatch): - """Accumulate read-and-write operations to be sent in a transaction. - - Args: - client (:class:`~google.cloud.firestore_v1.client.Client`): - The client that created this transaction. - max_attempts (Optional[int]): The maximum number of attempts for - the transaction (i.e. allowing retries). Defaults to - :attr:`~google.cloud.firestore_v1.transaction.MAX_ATTEMPTS`. - read_only (Optional[bool]): Flag indicating if the transaction - should be read-only or should allow writes. Defaults to - :data:`False`. - """ - - def __init__(self, client, max_attempts=MAX_ATTEMPTS, read_only=False): - super(Transaction, self).__init__(client) - self._max_attempts = max_attempts - self._read_only = read_only - self._id = None - - def _add_write_pbs(self, write_pbs): - """Add `Write`` protobufs to this transaction. - - Args: - write_pbs (List[google.cloud.proto.firestore.v1.\ - write_pb2.Write]): A list of write protobufs to be added. - - Raises: - ValueError: If this transaction is read-only. - """ - if self._read_only: - raise ValueError(_WRITE_READ_ONLY) - - super(Transaction, self)._add_write_pbs(write_pbs) - - def _options_protobuf(self, retry_id): - """Convert the current object to protobuf. - - The ``retry_id`` value is used when retrying a transaction that - failed (e.g. due to contention). It is intended to be the "first" - transaction that failed (i.e. if multiple retries are needed). - - Args: - retry_id (Union[bytes, NoneType]): Transaction ID of a transaction - to be retried. - - Returns: - Optional[google.cloud.firestore_v1.types.TransactionOptions]: - The protobuf ``TransactionOptions`` if ``read_only==True`` or if - there is a transaction ID to be retried, else :data:`None`. - - Raises: - ValueError: If ``retry_id`` is not :data:`None` but the - transaction is read-only. - """ - if retry_id is not None: - if self._read_only: - raise ValueError(_CANT_RETRY_READ_ONLY) - - return types.TransactionOptions( - read_write=types.TransactionOptions.ReadWrite( - retry_transaction=retry_id - ) - ) - elif self._read_only: - return types.TransactionOptions( - read_only=types.TransactionOptions.ReadOnly() - ) - else: - return None - - @property - def in_progress(self): - """Determine if this transaction has already begun. - - Returns: - bool: Indicates if the transaction has started. - """ - return self._id is not None - - @property - def id(self): - """Get the current transaction ID. - - Returns: - Optional[bytes]: The transaction ID (or :data:`None` if the - current transaction is not in progress). - """ - return self._id - - def _begin(self, retry_id=None): - """Begin the transaction. - - Args: - retry_id (Optional[bytes]): Transaction ID of a transaction to be - retried. - - Raises: - ValueError: If the current transaction has already begun. - """ - if self.in_progress: - msg = _CANT_BEGIN.format(self._id) - raise ValueError(msg) - - transaction_response = self._client._firestore_api.begin_transaction( - self._client._database_string, - options_=self._options_protobuf(retry_id), - metadata=self._client._rpc_metadata, - ) - self._id = transaction_response.transaction - - def _clean_up(self): - """Clean up the instance after :meth:`_rollback`` or :meth:`_commit``. - - This intended to occur on success or failure of the associated RPCs. - """ - self._write_pbs = [] - self._id = None - - def _rollback(self): - """Roll back the transaction. - - Raises: - ValueError: If no transaction is in progress. - """ - if not self.in_progress: - raise ValueError(_CANT_ROLLBACK) - - try: - # NOTE: The response is just ``google.protobuf.Empty``. - self._client._firestore_api.rollback( - self._client._database_string, - self._id, - metadata=self._client._rpc_metadata, - ) - finally: - self._clean_up() - - def _commit(self): - """Transactionally commit the changes accumulated. - - Returns: - List[:class:`google.cloud.proto.firestore.v1.write_pb2.WriteResult`, ...]: - The write results corresponding to the changes committed, returned - in the same order as the changes were applied to this transaction. - A write result contains an ``update_time`` field. - - Raises: - ValueError: If no transaction is in progress. - """ - if not self.in_progress: - raise ValueError(_CANT_COMMIT) - - commit_response = _commit_with_retry(self._client, self._write_pbs, self._id) - - self._clean_up() - return list(commit_response.write_results) - - def get_all(self, references): - """Retrieves multiple documents from Firestore. - - Args: - references (List[.DocumentReference, ...]): Iterable of document - references to be retrieved. - - Yields: - .DocumentSnapshot: The next document snapshot that fulfills the - query, or :data:`None` if the document does not exist. - """ - return self._client.get_all(references, transaction=self._id) - - def get(self, ref_or_query): - """ - Retrieve a document or a query result from the database. - Args: - ref_or_query The document references or query object to return. - Yields: - .DocumentSnapshot: The next document snapshot that fulfills the - query, or :data:`None` if the document does not exist. - """ - if isinstance(ref_or_query, DocumentReference): - return self._client.get_all([ref_or_query], transaction=self._id) - elif isinstance(ref_or_query, Query): - return ref_or_query.stream(transaction=self._id) - else: - raise ValueError( - 'Value for argument "ref_or_query" must be a DocumentReference or a Query.' - ) - - -class _Transactional(object): - """Provide a callable object to use as a transactional decorater. - - This is surfaced via - :func:`~google.cloud.firestore_v1.transaction.transactional`. - - Args: - to_wrap (Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]): - A callable that should be run (and retried) in a transaction. - """ - - def __init__(self, to_wrap): - self.to_wrap = to_wrap - self.current_id = None - """Optional[bytes]: The current transaction ID.""" - self.retry_id = None - """Optional[bytes]: The ID of the first attempted transaction.""" - - def _reset(self): - """Unset the transaction IDs.""" - self.current_id = None - self.retry_id = None - - def _pre_commit(self, transaction, *args, **kwargs): - """Begin transaction and call the wrapped callable. - - If the callable raises an exception, the transaction will be rolled - back. If not, the transaction will be "ready" for ``Commit`` (i.e. - it will have staged writes). - - Args: - transaction - (:class:`~google.cloud.firestore_v1.transaction.Transaction`): - A transaction to execute the callable within. - args (Tuple[Any, ...]): The extra positional arguments to pass - along to the wrapped callable. - kwargs (Dict[str, Any]): The extra keyword arguments to pass - along to the wrapped callable. - - Returns: - Any: result of the wrapped callable. - - Raises: - Exception: Any failure caused by ``to_wrap``. - """ - # Force the ``transaction`` to be not "in progress". - transaction._clean_up() - transaction._begin(retry_id=self.retry_id) - - # Update the stored transaction IDs. - self.current_id = transaction._id - if self.retry_id is None: - self.retry_id = self.current_id - try: - return self.to_wrap(transaction, *args, **kwargs) - except: # noqa - # NOTE: If ``rollback`` fails this will lose the information - # from the original failure. - transaction._rollback() - raise - - def _maybe_commit(self, transaction): - """Try to commit the transaction. - - If the transaction is read-write and the ``Commit`` fails with the - ``ABORTED`` status code, it will be retried. Any other failure will - not be caught. - - Args: - transaction - (:class:`~google.cloud.firestore_v1.transaction.Transaction`): - The transaction to be ``Commit``-ed. - - Returns: - bool: Indicating if the commit succeeded. - """ - try: - transaction._commit() - return True - except exceptions.GoogleAPICallError as exc: - if transaction._read_only: - raise - - if isinstance(exc, exceptions.Aborted): - # If a read-write transaction returns ABORTED, retry. - return False - else: - raise - - def __call__(self, transaction, *args, **kwargs): - """Execute the wrapped callable within a transaction. - - Args: - transaction - (:class:`~google.cloud.firestore_v1.transaction.Transaction`): - A transaction to execute the callable within. - args (Tuple[Any, ...]): The extra positional arguments to pass - along to the wrapped callable. - kwargs (Dict[str, Any]): The extra keyword arguments to pass - along to the wrapped callable. - - Returns: - Any: The result of the wrapped callable. - - Raises: - ValueError: If the transaction does not succeed in - ``max_attempts``. - """ - self._reset() - - for attempt in six.moves.xrange(transaction._max_attempts): - result = self._pre_commit(transaction, *args, **kwargs) - succeeded = self._maybe_commit(transaction) - if succeeded: - return result - - # Subsequent requests will use the failed transaction ID as part of - # the ``BeginTransactionRequest`` when restarting this transaction - # (via ``options.retry_transaction``). This preserves the "spot in - # line" of the transaction, so exponential backoff is not required - # in this case. - - transaction._rollback() - msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts) - raise ValueError(msg) - - -def transactional(to_wrap): - """Decorate a callable so that it runs in a transaction. - - Args: - to_wrap - (Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]): - A callable that should be run (and retried) in a transaction. - - Returns: - Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]: - the wrapped callable. - """ - return _Transactional(to_wrap) - - -def _commit_with_retry(client, write_pbs, transaction_id): - """Call ``Commit`` on the GAPIC client with retry / sleep. - - Retries the ``Commit`` RPC on Unavailable. Usually this RPC-level - retry is handled by the underlying GAPICd client, but in this case it - doesn't because ``Commit`` is not always idempotent. But here we know it - is "idempotent"-like because it has a transaction ID. We also need to do - our own retry to special-case the ``INVALID_ARGUMENT`` error. - - Args: - client (:class:`~google.cloud.firestore_v1.client.Client`): - A client with GAPIC client and configuration details. - write_pbs (List[:class:`google.cloud.proto.firestore.v1.write_pb2.Write`, ...]): - A ``Write`` protobuf instance to be committed. - transaction_id (bytes): - ID of an existing transaction that this commit will run in. - - Returns: - :class:`google.cloud.firestore_v1.types.CommitResponse`: - The protobuf response from ``Commit``. - - Raises: - ~google.api_core.exceptions.GoogleAPICallError: If a non-retryable - exception is encountered. - """ - current_sleep = _INITIAL_SLEEP - while True: - try: - return client._firestore_api.commit( - client._database_string, - write_pbs, - transaction=transaction_id, - metadata=client._rpc_metadata, - ) - except exceptions.ServiceUnavailable: - # Retry - pass - - current_sleep = _sleep(current_sleep) - - -def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER): - """Sleep and produce a new sleep time. - - .. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\ - 2015/03/backoff.html - - Select a duration between zero and ``current_sleep``. It might seem - counterintuitive to have so much jitter, but - `Exponential Backoff And Jitter`_ argues that "full jitter" is - the best strategy. - - Args: - current_sleep (float): The current "max" for sleep interval. - max_sleep (Optional[float]): Eventual "max" sleep time - multiplier (Optional[float]): Multiplier for exponential backoff. - - Returns: - float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever - is smaller) - """ - actual_sleep = random.uniform(0.0, current_sleep) - time.sleep(actual_sleep) - return min(multiplier * current_sleep, max_sleep) diff --git a/firestore/google/cloud/firestore_v1/transforms.py b/firestore/google/cloud/firestore_v1/transforms.py deleted file mode 100644 index 83b644608d01..000000000000 --- a/firestore/google/cloud/firestore_v1/transforms.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpful constants to use for Google Cloud Firestore.""" - - -class Sentinel(object): - """Sentinel objects used to signal special handling.""" - - __slots__ = ("description",) - - def __init__(self, description): - self.description = description - - def __repr__(self): - return "Sentinel: {}".format(self.description) - - -DELETE_FIELD = Sentinel("Value used to delete a field in a document.") - - -SERVER_TIMESTAMP = Sentinel( - "Value used to set a document field to the server timestamp." -) - - -class _ValueList(object): - """Read-only list of values. - - Args: - values (List | Tuple): values held in the helper. - """ - - slots = ("_values",) - - def __init__(self, values): - if not isinstance(values, (list, tuple)): - raise ValueError("'values' must be a list or tuple.") - - if len(values) == 0: - raise ValueError("'values' must be non-empty.") - - self._values = list(values) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._values == other._values - - @property - def values(self): - """Values to append. - - Returns (List): - values to be appended by the transform. - """ - return self._values - - -class ArrayUnion(_ValueList): - """Field transform: appends missing values to an array field. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.append_missing_elements - - Args: - values (List | Tuple): values to append. - """ - - -class ArrayRemove(_ValueList): - """Field transform: remove values from an array field. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.remove_all_from_array - - Args: - values (List | Tuple): values to remove. - """ - - -class _NumericValue(object): - """Hold a single integer / float value. - - Args: - value (int | float): value held in the helper. - """ - - def __init__(self, value): - if not isinstance(value, (int, float)): - raise ValueError("Pass an integer / float value.") - - self._value = value - - @property - def value(self): - """Value used by the transform. - - Returns: - (Integer | Float) value passed in the constructor. - """ - return self._value - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._value == other._value - - -class Increment(_NumericValue): - """Field transform: increment a numeric field with specified value. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.increment - - Args: - value (int | float): value used to increment the field. - """ - - -class Maximum(_NumericValue): - """Field transform: bound numeric field with specified value. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.maximum - - Args: - value (int | float): value used to bound the field. - """ - - -class Minimum(_NumericValue): - """Field transform: bound numeric field with specified value. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.minimum - - Args: - value (int | float): value used to bound the field. - """ diff --git a/firestore/google/cloud/firestore_v1/types.py b/firestore/google/cloud/firestore_v1/types.py deleted file mode 100644 index c4e7c350783d..000000000000 --- a/firestore/google/cloud/firestore_v1/types.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 -from google.type import latlng_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.firestore_v1.proto import common_pb2 -from google.cloud.firestore_v1.proto import document_pb2 -from google.cloud.firestore_v1.proto import firestore_pb2 -from google.cloud.firestore_v1.proto import query_pb2 -from google.cloud.firestore_v1.proto import write_pb2 - - -_shared_modules = [ - http_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - struct_pb2, - timestamp_pb2, - wrappers_pb2, - status_pb2, - latlng_pb2, -] - -_local_modules = [common_pb2, document_pb2, firestore_pb2, query_pb2, write_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.firestore_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/firestore/google/cloud/firestore_v1/watch.py b/firestore/google/cloud/firestore_v1/watch.py deleted file mode 100644 index 1037322230d1..000000000000 --- a/firestore/google/cloud/firestore_v1/watch.py +++ /dev/null @@ -1,743 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import collections -import threading -import datetime -from enum import Enum -import functools - -import pytz - -from google.api_core.bidi import ResumableBidiRpc -from google.api_core.bidi import BackgroundConsumer -from google.cloud.firestore_v1.proto import firestore_pb2 -from google.cloud.firestore_v1 import _helpers - -from google.api_core import exceptions - -import grpc - -"""Python client for Google Cloud Firestore Watch.""" - -_LOGGER = logging.getLogger(__name__) - -WATCH_TARGET_ID = 0x5079 # "Py" - -GRPC_STATUS_CODE = { - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, - "DO_NOT_USE": -1, -} -_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated" -_RECOVERABLE_STREAM_EXCEPTIONS = ( - exceptions.Aborted, - exceptions.Cancelled, - exceptions.Unknown, - exceptions.DeadlineExceeded, - exceptions.ResourceExhausted, - exceptions.InternalServerError, - exceptions.ServiceUnavailable, - exceptions.Unauthenticated, -) -_TERMINATING_STREAM_EXCEPTIONS = (exceptions.Cancelled,) - -DocTreeEntry = collections.namedtuple("DocTreeEntry", ["value", "index"]) - - -class WatchDocTree(object): - # TODO: Currently this uses a dict. Other implementations us an rbtree. - # The performance of this implementation should be investigated and may - # require modifying the underlying datastructure to a rbtree. - def __init__(self): - self._dict = {} - self._index = 0 - - def keys(self): - return list(self._dict.keys()) - - def _copy(self): - wdt = WatchDocTree() - wdt._dict = self._dict.copy() - wdt._index = self._index - self = wdt - return self - - def insert(self, key, value): - self = self._copy() - self._dict[key] = DocTreeEntry(value, self._index) - self._index += 1 - return self - - def find(self, key): - return self._dict[key] - - def remove(self, key): - self = self._copy() - del self._dict[key] - return self - - def __iter__(self): - for k in self._dict: - yield k - - def __len__(self): - return len(self._dict) - - def __contains__(self, k): - return k in self._dict - - -class ChangeType(Enum): - ADDED = 1 - REMOVED = 2 - MODIFIED = 3 - - -class DocumentChange(object): - def __init__(self, type, document, old_index, new_index): - """DocumentChange - - Args: - type (ChangeType): - document (document.DocumentSnapshot): - old_index (int): - new_index (int): - """ - # TODO: spec indicated an isEqual param also - self.type = type - self.document = document - self.old_index = old_index - self.new_index = new_index - - -class WatchResult(object): - def __init__(self, snapshot, name, change_type): - self.snapshot = snapshot - self.name = name - self.change_type = change_type - - -def _maybe_wrap_exception(exception): - """Wraps a gRPC exception class, if needed.""" - if isinstance(exception, grpc.RpcError): - return exceptions.from_grpc_error(exception) - return exception - - -def document_watch_comparator(doc1, doc2): - assert doc1 == doc2, "Document watches only support one document." - return 0 - - -def _should_recover(exception): - wrapped = _maybe_wrap_exception(exception) - return isinstance(wrapped, _RECOVERABLE_STREAM_EXCEPTIONS) - - -def _should_terminate(exception): - wrapped = _maybe_wrap_exception(exception) - return isinstance(wrapped, _TERMINATING_STREAM_EXCEPTIONS) - - -class Watch(object): - - BackgroundConsumer = BackgroundConsumer # FBO unit tests - ResumableBidiRpc = ResumableBidiRpc # FBO unit tests - - def __init__( - self, - document_reference, - firestore, - target, - comparator, - snapshot_callback, - document_snapshot_cls, - document_reference_cls, - BackgroundConsumer=None, # FBO unit testing - ResumableBidiRpc=None, # FBO unit testing - ): - """ - Args: - firestore: - target: - comparator: - snapshot_callback: Callback method to process snapshots. - Args: - docs (List(DocumentSnapshot)): A callback that returns the - ordered list of documents stored in this snapshot. - changes (List(str)): A callback that returns the list of - changed documents since the last snapshot delivered for - this watch. - read_time (string): The ISO 8601 time at which this - snapshot was obtained. - - document_snapshot_cls: instance of DocumentSnapshot - document_reference_cls: instance of DocumentReference - """ - self._document_reference = document_reference - self._firestore = firestore - self._api = firestore._firestore_api - self._targets = target - self._comparator = comparator - self.DocumentSnapshot = document_snapshot_cls - self.DocumentReference = document_reference_cls - self._snapshot_callback = snapshot_callback - self._closing = threading.Lock() - self._closed = False - - self.resume_token = None - - rpc_request = self._get_rpc_request - - if ResumableBidiRpc is None: - ResumableBidiRpc = self.ResumableBidiRpc # FBO unit tests - - self._rpc = ResumableBidiRpc( - self._api.transport.listen, - should_recover=_should_recover, - should_terminate=_should_terminate, - initial_request=rpc_request, - metadata=self._firestore._rpc_metadata, - ) - - self._rpc.add_done_callback(self._on_rpc_done) - - # Initialize state for on_snapshot - # The sorted tree of QueryDocumentSnapshots as sent in the last - # snapshot. We only look at the keys. - self.doc_tree = WatchDocTree() - - # A map of document names to QueryDocumentSnapshots for the last sent - # snapshot. - self.doc_map = {} - - # The accumulates map of document changes (keyed by document name) for - # the current snapshot. - self.change_map = {} - - # The current state of the query results. - self.current = False - - # We need this to track whether we've pushed an initial set of changes, - # since we should push those even when there are no changes, if there - # aren't docs. - self.has_pushed = False - - # The server assigns and updates the resume token. - if BackgroundConsumer is None: # FBO unit tests - BackgroundConsumer = self.BackgroundConsumer - - self._consumer = BackgroundConsumer(self._rpc, self.on_snapshot) - self._consumer.start() - - def _get_rpc_request(self): - if self.resume_token is not None: - self._targets["resume_token"] = self.resume_token - return firestore_pb2.ListenRequest( - database=self._firestore._database_string, add_target=self._targets - ) - - @property - def is_active(self): - """bool: True if this manager is actively streaming. - - Note that ``False`` does not indicate this is complete shut down, - just that it stopped getting new messages. - """ - return self._consumer is not None and self._consumer.is_active - - def close(self, reason=None): - """Stop consuming messages and shutdown all helper threads. - - This method is idempotent. Additional calls will have no effect. - - Args: - reason (Any): The reason to close this. If None, this is considered - an "intentional" shutdown. - """ - with self._closing: - if self._closed: - return - - # Stop consuming messages. - if self.is_active: - _LOGGER.debug("Stopping consumer.") - self._consumer.stop() - self._consumer = None - - self._rpc.close() - self._rpc = None - self._closed = True - _LOGGER.debug("Finished stopping manager.") - - if reason: - # Raise an exception if a reason is provided - _LOGGER.debug("reason for closing: %s" % reason) - if isinstance(reason, Exception): - raise reason - raise RuntimeError(reason) - - def _on_rpc_done(self, future): - """Triggered whenever the underlying RPC terminates without recovery. - - This is typically triggered from one of two threads: the background - consumer thread (when calling ``recv()`` produces a non-recoverable - error) or the grpc management thread (when cancelling the RPC). - - This method is *non-blocking*. It will start another thread to deal - with shutting everything down. This is to prevent blocking in the - background consumer and preventing it from being ``joined()``. - """ - _LOGGER.info("RPC termination has signaled manager shutdown.") - future = _maybe_wrap_exception(future) - thread = threading.Thread( - name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} - ) - thread.daemon = True - thread.start() - - def unsubscribe(self): - self.close() - - @classmethod - def for_document( - cls, - document_ref, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ): - """ - Creates a watch snapshot listener for a document. snapshot_callback - receives a DocumentChange object, but may also start to get - targetChange and such soon - - Args: - document_ref: Reference to Document - snapshot_callback: callback to be called on snapshot - snapshot_class_instance: instance of DocumentSnapshot to make - snapshots with to pass to snapshot_callback - reference_class_instance: instance of DocumentReference to make - references - - """ - return cls( - document_ref, - document_ref._client, - { - "documents": {"documents": [document_ref._document_path]}, - "target_id": WATCH_TARGET_ID, - }, - document_watch_comparator, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ) - - @classmethod - def for_query( - cls, query, snapshot_callback, snapshot_class_instance, reference_class_instance - ): - parent_path, _ = query._parent._parent_info() - query_target = firestore_pb2.Target.QueryTarget( - parent=parent_path, structured_query=query._to_protobuf() - ) - - return cls( - query, - query._client, - {"query": query_target, "target_id": WATCH_TARGET_ID}, - query._comparator, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ) - - def _on_snapshot_target_change_no_change(self, proto): - _LOGGER.debug("on_snapshot: target change: NO_CHANGE") - change = proto.target_change - - no_target_ids = change.target_ids is None or len(change.target_ids) == 0 - if no_target_ids and change.read_time and self.current: - # TargetChange.CURRENT followed by TargetChange.NO_CHANGE - # signals a consistent state. Invoke the onSnapshot - # callback as specified by the user. - self.push(change.read_time, change.resume_token) - - def _on_snapshot_target_change_add(self, proto): - _LOGGER.debug("on_snapshot: target change: ADD") - target_id = proto.target_change.target_ids[0] - if target_id != WATCH_TARGET_ID: - raise RuntimeError("Unexpected target ID %s sent by server" % target_id) - - def _on_snapshot_target_change_remove(self, proto): - _LOGGER.debug("on_snapshot: target change: REMOVE") - change = proto.target_change - - code = 13 - message = "internal error" - if change.cause: - code = change.cause.code - message = change.cause.message - - message = "Error %s: %s" % (code, message) - - raise RuntimeError(message) - - def _on_snapshot_target_change_reset(self, proto): - # Whatever changes have happened so far no longer matter. - _LOGGER.debug("on_snapshot: target change: RESET") - self._reset_docs() - - def _on_snapshot_target_change_current(self, proto): - _LOGGER.debug("on_snapshot: target change: CURRENT") - self.current = True - - def on_snapshot(self, proto): - """ - Called everytime there is a response from listen. Collect changes - and 'push' the changes in a batch to the customer when we receive - 'current' from the listen response. - - Args: - listen_response(`google.cloud.firestore_v1.types.ListenResponse`): - Callback method that receives a object to - """ - TargetChange = firestore_pb2.TargetChange - - target_changetype_dispatch = { - TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change, - TargetChange.ADD: self._on_snapshot_target_change_add, - TargetChange.REMOVE: self._on_snapshot_target_change_remove, - TargetChange.RESET: self._on_snapshot_target_change_reset, - TargetChange.CURRENT: self._on_snapshot_target_change_current, - } - - target_change = getattr(proto, "target_change", "") - document_change = getattr(proto, "document_change", "") - document_delete = getattr(proto, "document_delete", "") - document_remove = getattr(proto, "document_remove", "") - filter_ = getattr(proto, "filter", "") - - if str(target_change): - target_change_type = target_change.target_change_type - _LOGGER.debug("on_snapshot: target change: " + str(target_change_type)) - meth = target_changetype_dispatch.get(target_change_type) - if meth is None: - _LOGGER.info( - "on_snapshot: Unknown target change " + str(target_change_type) - ) - self.close( - reason="Unknown target change type: %s " % str(target_change_type) - ) - else: - try: - meth(proto) - except Exception as exc2: - _LOGGER.debug("meth(proto) exc: " + str(exc2)) - raise - - # NOTE: - # in other implementations, such as node, the backoff is reset here - # in this version bidi rpc is just used and will control this. - - elif str(document_change): - _LOGGER.debug("on_snapshot: document change") - - # No other target_ids can show up here, but we still need to see - # if the targetId was in the added list or removed list. - target_ids = document_change.target_ids or [] - removed_target_ids = document_change.removed_target_ids or [] - changed = False - removed = False - - if WATCH_TARGET_ID in target_ids: - changed = True - - if WATCH_TARGET_ID in removed_target_ids: - removed = True - - if changed: - _LOGGER.debug("on_snapshot: document change: CHANGED") - - # google.cloud.firestore_v1.types.Document - document = document_change.document - - data = _helpers.decode_dict(document.fields, self._firestore) - - # Create a snapshot. As Document and Query objects can be - # passed we need to get a Document Reference in a more manual - # fashion than self._document_reference - document_name = document.name - db_str = self._firestore._database_string - db_str_documents = db_str + "/documents/" - if document_name.startswith(db_str_documents): - document_name = document_name[len(db_str_documents) :] - - document_ref = self._firestore.document(document_name) - - snapshot = self.DocumentSnapshot( - reference=document_ref, - data=data, - exists=True, - read_time=None, - create_time=document.create_time, - update_time=document.update_time, - ) - self.change_map[document.name] = snapshot - - elif removed: - _LOGGER.debug("on_snapshot: document change: REMOVED") - document = document_change.document - self.change_map[document.name] = ChangeType.REMOVED - - # NB: document_delete and document_remove (as far as we, the client, - # are concerned) are functionally equivalent - - elif str(document_delete): - _LOGGER.debug("on_snapshot: document change: DELETE") - name = document_delete.document - self.change_map[name] = ChangeType.REMOVED - - elif str(document_remove): - _LOGGER.debug("on_snapshot: document change: REMOVE") - name = document_remove.document - self.change_map[name] = ChangeType.REMOVED - - elif filter_: - _LOGGER.debug("on_snapshot: filter update") - if filter_.count != self._current_size(): - # We need to remove all the current results. - self._reset_docs() - # The filter didn't match, so re-issue the query. - # TODO: reset stream method? - # self._reset_stream(); - - elif proto is None: - self.close() - else: - _LOGGER.debug("UNKNOWN TYPE. UHOH") - self.close(reason=ValueError("Unknown listen response type: %s" % proto)) - - def push(self, read_time, next_resume_token): - """ - Assembles a new snapshot from the current set of changes and invokes - the user's callback. Clears the current changes on completion. - """ - deletes, adds, updates = Watch._extract_changes( - self.doc_map, self.change_map, read_time - ) - - updated_tree, updated_map, appliedChanges = self._compute_snapshot( - self.doc_tree, self.doc_map, deletes, adds, updates - ) - - if not self.has_pushed or len(appliedChanges): - # TODO: It is possible in the future we will have the tree order - # on insert. For now, we sort here. - key = functools.cmp_to_key(self._comparator) - keys = sorted(updated_tree.keys(), key=key) - - self._snapshot_callback( - keys, - appliedChanges, - datetime.datetime.fromtimestamp(read_time.seconds, pytz.utc), - ) - self.has_pushed = True - - self.doc_tree = updated_tree - self.doc_map = updated_map - self.change_map.clear() - self.resume_token = next_resume_token - - @staticmethod - def _extract_changes(doc_map, changes, read_time): - deletes = [] - adds = [] - updates = [] - - for name, value in changes.items(): - if value == ChangeType.REMOVED: - if name in doc_map: - deletes.append(name) - elif name in doc_map: - if read_time is not None: - value.read_time = read_time - updates.append(value) - else: - if read_time is not None: - value.read_time = read_time - adds.append(value) - - return (deletes, adds, updates) - - def _compute_snapshot( - self, doc_tree, doc_map, delete_changes, add_changes, update_changes - ): - updated_tree = doc_tree - updated_map = doc_map - - assert len(doc_tree) == len(doc_map), ( - "The document tree and document map should have the same " - + "number of entries." - ) - - def delete_doc(name, updated_tree, updated_map): - """ - Applies a document delete to the document tree and document map. - Returns the corresponding DocumentChange event. - """ - assert name in updated_map, "Document to delete does not exist" - old_document = updated_map.get(name) - # TODO: If a document doesn't exist this raises IndexError. Handle? - existing = updated_tree.find(old_document) - old_index = existing.index - updated_tree = updated_tree.remove(old_document) - del updated_map[name] - return ( - DocumentChange(ChangeType.REMOVED, old_document, old_index, -1), - updated_tree, - updated_map, - ) - - def add_doc(new_document, updated_tree, updated_map): - """ - Applies a document add to the document tree and the document map. - Returns the corresponding DocumentChange event. - """ - name = new_document.reference._document_path - assert name not in updated_map, "Document to add already exists" - updated_tree = updated_tree.insert(new_document, None) - new_index = updated_tree.find(new_document).index - updated_map[name] = new_document - return ( - DocumentChange(ChangeType.ADDED, new_document, -1, new_index), - updated_tree, - updated_map, - ) - - def modify_doc(new_document, updated_tree, updated_map): - """ - Applies a document modification to the document tree and the - document map. - Returns the DocumentChange event for successful modifications. - """ - name = new_document.reference._document_path - assert name in updated_map, "Document to modify does not exist" - old_document = updated_map.get(name) - if old_document.update_time != new_document.update_time: - remove_change, updated_tree, updated_map = delete_doc( - name, updated_tree, updated_map - ) - add_change, updated_tree, updated_map = add_doc( - new_document, updated_tree, updated_map - ) - return ( - DocumentChange( - ChangeType.MODIFIED, - new_document, - remove_change.old_index, - add_change.new_index, - ), - updated_tree, - updated_map, - ) - - return None, updated_tree, updated_map - - # Process the sorted changes in the order that is expected by our - # clients (removals, additions, and then modifications). We also need - # to sort the individual changes to assure that old_index/new_index - # keep incrementing. - appliedChanges = [] - - key = functools.cmp_to_key(self._comparator) - - # Deletes are sorted based on the order of the existing document. - delete_changes = sorted(delete_changes) - for name in delete_changes: - change, updated_tree, updated_map = delete_doc( - name, updated_tree, updated_map - ) - appliedChanges.append(change) - - add_changes = sorted(add_changes, key=key) - _LOGGER.debug("walk over add_changes") - for snapshot in add_changes: - _LOGGER.debug("in add_changes") - change, updated_tree, updated_map = add_doc( - snapshot, updated_tree, updated_map - ) - appliedChanges.append(change) - - update_changes = sorted(update_changes, key=key) - for snapshot in update_changes: - change, updated_tree, updated_map = modify_doc( - snapshot, updated_tree, updated_map - ) - if change is not None: - appliedChanges.append(change) - - assert len(updated_tree) == len(updated_map), ( - "The update document " - + "tree and document map should have the same number of entries." - ) - return (updated_tree, updated_map, appliedChanges) - - def _affects_target(self, target_ids, current_id): - if target_ids is None: - return True - - return current_id in target_ids - - def _current_size(self): - """ - Returns the current count of all documents, including the changes from - the current changeMap. - """ - deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None) - return len(self.doc_map) + len(adds) - len(deletes) - - def _reset_docs(self): - """ - Helper to clear the docs on RESET or filter mismatch. - """ - _LOGGER.debug("resetting documents") - self.change_map.clear() - self.resume_token = None - - # Mark each document as deleted. If documents are not deleted - # they will be sent again by the server. - for snapshot in self.doc_tree.keys(): - name = snapshot.reference._document_path - self.change_map[name] = ChangeType.REMOVED - - self.current = False diff --git a/firestore/google/cloud/firestore_v1beta1/__init__.py b/firestore/google/cloud/firestore_v1beta1/__init__.py deleted file mode 100644 index f681d84e6a37..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Python idiomatic client for Google Cloud Firestore.""" - -from pkg_resources import get_distribution - -__version__ = get_distribution("google-cloud-firestore").version - -from google.cloud.firestore_v1beta1 import types -from google.cloud.firestore_v1beta1._helpers import GeoPoint -from google.cloud.firestore_v1beta1._helpers import ExistsOption -from google.cloud.firestore_v1beta1._helpers import LastUpdateOption -from google.cloud.firestore_v1beta1._helpers import ReadAfterWriteError -from google.cloud.firestore_v1beta1._helpers import WriteOption -from google.cloud.firestore_v1beta1.batch import WriteBatch -from google.cloud.firestore_v1beta1.client import Client -from google.cloud.firestore_v1beta1.collection import CollectionReference -from google.cloud.firestore_v1beta1.transforms import ArrayRemove -from google.cloud.firestore_v1beta1.transforms import ArrayUnion -from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD -from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP -from google.cloud.firestore_v1beta1.document import DocumentReference -from google.cloud.firestore_v1beta1.document import DocumentSnapshot -from google.cloud.firestore_v1beta1.gapic import enums -from google.cloud.firestore_v1beta1.query import Query -from google.cloud.firestore_v1beta1.transaction import Transaction -from google.cloud.firestore_v1beta1.transaction import transactional -from google.cloud.firestore_v1beta1.watch import Watch - - -__all__ = [ - "__version__", - "ArrayRemove", - "ArrayUnion", - "Client", - "CollectionReference", - "DELETE_FIELD", - "DocumentReference", - "DocumentSnapshot", - "enums", - "ExistsOption", - "GeoPoint", - "LastUpdateOption", - "Query", - "ReadAfterWriteError", - "SERVER_TIMESTAMP", - "Transaction", - "transactional", - "types", - "Watch", - "WriteBatch", - "WriteOption", -] diff --git a/firestore/google/cloud/firestore_v1beta1/_helpers.py b/firestore/google/cloud/firestore_v1beta1/_helpers.py deleted file mode 100644 index 11dcefc98fad..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/_helpers.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Common helpers shared across Google Cloud Firestore modules.""" - -import datetime - -from google.protobuf import struct_pb2 -from google.type import latlng_pb2 -import grpc -import six - -from google.cloud import exceptions -from google.cloud._helpers import _datetime_to_pb_timestamp -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -from google.cloud.firestore_v1beta1 import transforms -from google.cloud.firestore_v1beta1 import types -from google.cloud.firestore_v1beta1.field_path import FieldPath -from google.cloud.firestore_v1beta1.field_path import parse_field_path -from google.cloud.firestore_v1beta1.gapic import enums -from google.cloud.firestore_v1beta1.proto import common_pb2 -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.proto import write_pb2 - - -BAD_PATH_TEMPLATE = "A path element must be a string. Received {}, which is a {}." -DOCUMENT_PATH_DELIMITER = "/" -INACTIVE_TXN = "Transaction not in progress, cannot be used in API requests." -READ_AFTER_WRITE_ERROR = "Attempted read after write in a transaction." -BAD_REFERENCE_ERROR = ( - "Reference value {!r} in unexpected format, expected to be of the form " - "``projects/{{project}}/databases/{{database}}/" - "documents/{{document_path}}``." -) -WRONG_APP_REFERENCE = ( - "Document {!r} does not correspond to the same database " "({!r}) as the client." -) -REQUEST_TIME_ENUM = enums.DocumentTransform.FieldTransform.ServerValue.REQUEST_TIME -_GRPC_ERROR_MAPPING = { - grpc.StatusCode.ALREADY_EXISTS: exceptions.Conflict, - grpc.StatusCode.NOT_FOUND: exceptions.NotFound, -} - - -class GeoPoint(object): - """Simple container for a geo point value. - - Args: - latitude (float): Latitude of a point. - longitude (float): Longitude of a point. - """ - - def __init__(self, latitude, longitude): - self.latitude = latitude - self.longitude = longitude - - def to_protobuf(self): - """Convert the current object to protobuf. - - Returns: - google.type.latlng_pb2.LatLng: The current point as a protobuf. - """ - return latlng_pb2.LatLng(latitude=self.latitude, longitude=self.longitude) - - def __eq__(self, other): - """Compare two geo points for equality. - - Returns: - Union[bool, NotImplemented]: :data:`True` if the points compare - equal, else :data:`False`. (Or :data:`NotImplemented` if - ``other`` is not a geo point.) - """ - if not isinstance(other, GeoPoint): - return NotImplemented - - return self.latitude == other.latitude and self.longitude == other.longitude - - def __ne__(self, other): - """Compare two geo points for inequality. - - Returns: - Union[bool, NotImplemented]: :data:`False` if the points compare - equal, else :data:`True`. (Or :data:`NotImplemented` if - ``other`` is not a geo point.) - """ - equality_val = self.__eq__(other) - if equality_val is NotImplemented: - return NotImplemented - else: - return not equality_val - - -def verify_path(path, is_collection): - """Verifies that a ``path`` has the correct form. - - Checks that all of the elements in ``path`` are strings. - - Args: - path (Tuple[str, ...]): The components in a collection or - document path. - is_collection (bool): Indicates if the ``path`` represents - a document or a collection. - - Raises: - ValueError: if - - * the ``path`` is empty - * ``is_collection=True`` and there are an even number of elements - * ``is_collection=False`` and there are an odd number of elements - * an element is not a string - """ - num_elements = len(path) - if num_elements == 0: - raise ValueError("Document or collection path cannot be empty") - - if is_collection: - if num_elements % 2 == 0: - raise ValueError("A collection must have an odd number of path elements") - else: - if num_elements % 2 == 1: - raise ValueError("A document must have an even number of path elements") - - for element in path: - if not isinstance(element, six.string_types): - msg = BAD_PATH_TEMPLATE.format(element, type(element)) - raise ValueError(msg) - - -def encode_value(value): - """Converts a native Python value into a Firestore protobuf ``Value``. - - Args: - value (Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native - Python value to convert to a protobuf field. - - Returns: - ~google.cloud.firestore_v1beta1.types.Value: A - value encoded as a Firestore protobuf. - - Raises: - TypeError: If the ``value`` is not one of the accepted types. - """ - if value is None: - return document_pb2.Value(null_value=struct_pb2.NULL_VALUE) - - # Must come before six.integer_types since ``bool`` is an integer subtype. - if isinstance(value, bool): - return document_pb2.Value(boolean_value=value) - - if isinstance(value, six.integer_types): - return document_pb2.Value(integer_value=value) - - if isinstance(value, float): - return document_pb2.Value(double_value=value) - - if isinstance(value, DatetimeWithNanoseconds): - return document_pb2.Value(timestamp_value=value.timestamp_pb()) - - if isinstance(value, datetime.datetime): - return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value)) - - if isinstance(value, six.text_type): - return document_pb2.Value(string_value=value) - - if isinstance(value, six.binary_type): - return document_pb2.Value(bytes_value=value) - - # NOTE: We avoid doing an isinstance() check for a Document - # here to avoid import cycles. - document_path = getattr(value, "_document_path", None) - if document_path is not None: - return document_pb2.Value(reference_value=document_path) - - if isinstance(value, GeoPoint): - return document_pb2.Value(geo_point_value=value.to_protobuf()) - - if isinstance(value, list): - value_list = [encode_value(element) for element in value] - value_pb = document_pb2.ArrayValue(values=value_list) - return document_pb2.Value(array_value=value_pb) - - if isinstance(value, dict): - value_dict = encode_dict(value) - value_pb = document_pb2.MapValue(fields=value_dict) - return document_pb2.Value(map_value=value_pb) - - raise TypeError( - "Cannot convert to a Firestore Value", value, "Invalid type", type(value) - ) - - -def encode_dict(values_dict): - """Encode a dictionary into protobuf ``Value``-s. - - Args: - values_dict (dict): The dictionary to encode as protobuf fields. - - Returns: - Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A - dictionary of string keys and ``Value`` protobufs as dictionary - values. - """ - return {key: encode_value(value) for key, value in six.iteritems(values_dict)} - - -def reference_value_to_document(reference_value, client): - """Convert a reference value string to a document. - - Args: - reference_value (str): A document reference value. - client (~.firestore_v1beta1.client.Client): A client that has - a document factory. - - Returns: - ~.firestore_v1beta1.document.DocumentReference: The document - corresponding to ``reference_value``. - - Raises: - ValueError: If the ``reference_value`` is not of the expected - format: ``projects/{project}/databases/{database}/documents/...``. - ValueError: If the ``reference_value`` does not come from the same - project / database combination as the ``client``. - """ - # The first 5 parts are - # projects, {project}, databases, {database}, documents - parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5) - if len(parts) != 6: - msg = BAD_REFERENCE_ERROR.format(reference_value) - raise ValueError(msg) - - # The sixth part is `a/b/c/d` (i.e. the document path) - document = client.document(parts[-1]) - if document._document_path != reference_value: - msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string) - raise ValueError(msg) - - return document - - -def decode_value(value, client): - """Converts a Firestore protobuf ``Value`` to a native Python value. - - Args: - value (google.cloud.firestore_v1beta1.types.Value): A - Firestore protobuf to be decoded / parsed / converted. - client (~.firestore_v1beta1.client.Client): A client that has - a document factory. - - Returns: - Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native - Python value converted from the ``value``. - - Raises: - NotImplementedError: If the ``value_type`` is ``reference_value``. - ValueError: If the ``value_type`` is unknown. - """ - value_type = value.WhichOneof("value_type") - - if value_type == "null_value": - return None - elif value_type == "boolean_value": - return value.boolean_value - elif value_type == "integer_value": - return value.integer_value - elif value_type == "double_value": - return value.double_value - elif value_type == "timestamp_value": - return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) - elif value_type == "string_value": - return value.string_value - elif value_type == "bytes_value": - return value.bytes_value - elif value_type == "reference_value": - return reference_value_to_document(value.reference_value, client) - elif value_type == "geo_point_value": - return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude) - elif value_type == "array_value": - return [decode_value(element, client) for element in value.array_value.values] - elif value_type == "map_value": - return decode_dict(value.map_value.fields, client) - else: - raise ValueError("Unknown ``value_type``", value_type) - - -def decode_dict(value_fields, client): - """Converts a protobuf map of Firestore ``Value``-s. - - Args: - value_fields (google.protobuf.pyext._message.MessageMapContainer): A - protobuf map of Firestore ``Value``-s. - client (~.firestore_v1beta1.client.Client): A client that has - a document factory. - - Returns: - Dict[str, Union[NoneType, bool, int, float, datetime.datetime, \ - str, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary - of native Python values converted from the ``value_fields``. - """ - return { - key: decode_value(value, client) for key, value in six.iteritems(value_fields) - } - - -def get_doc_id(document_pb, expected_prefix): - """Parse a document ID from a document protobuf. - - Args: - document_pb (google.cloud.proto.firestore.v1beta1.\ - document_pb2.Document): A protobuf for a document that - was created in a ``CreateDocument`` RPC. - expected_prefix (str): The expected collection prefix for the - fully-qualified document name. - - Returns: - str: The document ID from the protobuf. - - Raises: - ValueError: If the name does not begin with the prefix. - """ - prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1) - if prefix != expected_prefix: - raise ValueError( - "Unexpected document name", - document_pb.name, - "Expected to begin with", - expected_prefix, - ) - - return document_id - - -_EmptyDict = transforms.Sentinel("Marker for an empty dict value") - - -def extract_fields(document_data, prefix_path, expand_dots=False): - """Do depth-first walk of tree, yielding field_path, value""" - if not document_data: - yield prefix_path, _EmptyDict - else: - for key, value in sorted(six.iteritems(document_data)): - - if expand_dots: - sub_key = FieldPath.from_string(key) - else: - sub_key = FieldPath(key) - - field_path = FieldPath(*(prefix_path.parts + sub_key.parts)) - - if isinstance(value, dict): - for s_path, s_value in extract_fields(value, field_path): - yield s_path, s_value - else: - yield field_path, value - - -def set_field_value(document_data, field_path, value): - """Set a value into a document for a field_path""" - current = document_data - for element in field_path.parts[:-1]: - current = current.setdefault(element, {}) - if value is _EmptyDict: - value = {} - current[field_path.parts[-1]] = value - - -def get_field_value(document_data, field_path): - if not field_path.parts: - raise ValueError("Empty path") - - current = document_data - for element in field_path.parts[:-1]: - current = current[element] - return current[field_path.parts[-1]] - - -class DocumentExtractor(object): - """ Break document data up into actual data and transforms. - - Handle special values such as ``DELETE_FIELD``, ``SERVER_TIMESTAMP``. - - Args: - document_data (dict): - Property names and values to use for sending a change to - a document. - """ - - def __init__(self, document_data): - self.document_data = document_data - self.field_paths = [] - self.deleted_fields = [] - self.server_timestamps = [] - self.array_removes = {} - self.array_unions = {} - self.set_fields = {} - self.empty_document = False - - prefix_path = FieldPath() - iterator = self._get_document_iterator(prefix_path) - - for field_path, value in iterator: - - if field_path == prefix_path and value is _EmptyDict: - self.empty_document = True - - elif value is transforms.DELETE_FIELD: - self.deleted_fields.append(field_path) - - elif value is transforms.SERVER_TIMESTAMP: - self.server_timestamps.append(field_path) - - elif isinstance(value, transforms.ArrayRemove): - self.array_removes[field_path] = value.values - - elif isinstance(value, transforms.ArrayUnion): - self.array_unions[field_path] = value.values - - else: - self.field_paths.append(field_path) - set_field_value(self.set_fields, field_path, value) - - def _get_document_iterator(self, prefix_path): - return extract_fields(self.document_data, prefix_path) - - @property - def has_transforms(self): - return bool(self.server_timestamps or self.array_removes or self.array_unions) - - @property - def transform_paths(self): - return sorted( - self.server_timestamps + list(self.array_removes) + list(self.array_unions) - ) - - def _get_update_mask(self, allow_empty_mask=False): - return None - - def get_update_pb(self, document_path, exists=None, allow_empty_mask=False): - - if exists is not None: - current_document = common_pb2.Precondition(exists=exists) - else: - current_document = None - - update_pb = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=encode_dict(self.set_fields) - ), - update_mask=self._get_update_mask(allow_empty_mask), - current_document=current_document, - ) - - return update_pb - - def get_transform_pb(self, document_path, exists=None): - def make_array_value(values): - value_list = [encode_value(element) for element in values] - return document_pb2.ArrayValue(values=value_list) - - path_field_transforms = ( - [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - set_to_server_value=REQUEST_TIME_ENUM, - ), - ) - for path in self.server_timestamps - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - remove_all_from_array=make_array_value(values), - ), - ) - for path, values in self.array_removes.items() - ] - + [ - ( - path, - write_pb2.DocumentTransform.FieldTransform( - field_path=path.to_api_repr(), - append_missing_elements=make_array_value(values), - ), - ) - for path, values in self.array_unions.items() - ] - ) - field_transforms = [ - transform for path, transform in sorted(path_field_transforms) - ] - transform_pb = write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=field_transforms - ) - ) - if exists is not None: - transform_pb.current_document.CopyFrom( - common_pb2.Precondition(exists=exists) - ) - - return transform_pb - - -def pbs_for_create(document_path, document_data): - """Make ``Write`` protobufs for ``create()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - creating a document. - - Returns: - List[google.cloud.firestore_v1beta1.types.Write]: One or two - ``Write`` protobuf instances for ``create()``. - """ - extractor = DocumentExtractor(document_data) - - if extractor.deleted_fields: - raise ValueError("Cannot apply DELETE_FIELD in a create request.") - - write_pbs = [] - - # Conformance tests require skipping the 'update_pb' if the document - # contains only transforms. - if extractor.empty_document or extractor.set_fields: - write_pbs.append(extractor.get_update_pb(document_path, exists=False)) - - if extractor.has_transforms: - exists = None if write_pbs else False - transform_pb = extractor.get_transform_pb(document_path, exists) - write_pbs.append(transform_pb) - - return write_pbs - - -def pbs_for_set_no_merge(document_path, document_data): - """Make ``Write`` protobufs for ``set()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - replacing a document. - - Returns: - List[google.cloud.firestore_v1beta1.types.Write]: One - or two ``Write`` protobuf instances for ``set()``. - """ - extractor = DocumentExtractor(document_data) - - if extractor.deleted_fields: - raise ValueError( - "Cannot apply DELETE_FIELD in a set request without " - "specifying 'merge=True' or 'merge=[field_paths]'." - ) - - # Conformance tests require send the 'update_pb' even if the document - # contains only transforms. - write_pbs = [extractor.get_update_pb(document_path)] - - if extractor.has_transforms: - transform_pb = extractor.get_transform_pb(document_path) - write_pbs.append(transform_pb) - - return write_pbs - - -class DocumentExtractorForMerge(DocumentExtractor): - """ Break document data up into actual data and transforms. - """ - - def __init__(self, document_data): - super(DocumentExtractorForMerge, self).__init__(document_data) - self.data_merge = [] - self.transform_merge = [] - self.merge = [] - - @property - def has_updates(self): - # for whatever reason, the conformance tests want to see the parent - # of nested transform paths in the update mask - # (see set-st-merge-nonleaf-alone.textproto) - update_paths = set(self.data_merge) - - for transform_path in self.transform_paths: - if len(transform_path.parts) > 1: - parent_fp = FieldPath(*transform_path.parts[:-1]) - update_paths.add(parent_fp) - - return bool(update_paths) - - def _apply_merge_all(self): - self.data_merge = sorted(self.field_paths + self.deleted_fields) - # TODO: other transforms - self.transform_merge = self.transform_paths - self.merge = sorted(self.data_merge + self.transform_paths) - - def _construct_merge_paths(self, merge): - for merge_field in merge: - if isinstance(merge_field, FieldPath): - yield merge_field - else: - yield FieldPath(*parse_field_path(merge_field)) - - def _normalize_merge_paths(self, merge): - merge_paths = sorted(self._construct_merge_paths(merge)) - - # Raise if any merge path is a parent of another. Leverage sorting - # to avoid quadratic behavior. - for index in range(len(merge_paths) - 1): - lhs, rhs = merge_paths[index], merge_paths[index + 1] - if lhs.eq_or_parent(rhs): - raise ValueError("Merge paths overlap: {}, {}".format(lhs, rhs)) - - for merge_path in merge_paths: - if merge_path in self.deleted_fields: - continue - try: - get_field_value(self.document_data, merge_path) - except KeyError: - raise ValueError("Invalid merge path: {}".format(merge_path)) - - return merge_paths - - def _apply_merge_paths(self, merge): - - if self.empty_document: - raise ValueError("Cannot merge specific fields with empty document.") - - merge_paths = self._normalize_merge_paths(merge) - - del self.data_merge[:] - del self.transform_merge[:] - self.merge = merge_paths - - for merge_path in merge_paths: - - if merge_path in self.transform_paths: - self.transform_merge.append(merge_path) - - for field_path in self.field_paths: - if merge_path.eq_or_parent(field_path): - self.data_merge.append(field_path) - - # Clear out data for fields not merged. - merged_set_fields = {} - for field_path in self.data_merge: - value = get_field_value(self.document_data, field_path) - set_field_value(merged_set_fields, field_path, value) - self.set_fields = merged_set_fields - - unmerged_deleted_fields = [ - field_path - for field_path in self.deleted_fields - if field_path not in self.merge - ] - if unmerged_deleted_fields: - raise ValueError( - "Cannot delete unmerged fields: {}".format(unmerged_deleted_fields) - ) - self.data_merge = sorted(self.data_merge + self.deleted_fields) - - # Keep only transforms which are within merge. - merged_transform_paths = set() - for merge_path in self.merge: - tranform_merge_paths = [ - transform_path - for transform_path in self.transform_paths - if merge_path.eq_or_parent(transform_path) - ] - merged_transform_paths.update(tranform_merge_paths) - - self.server_timestamps = [ - path for path in self.server_timestamps if path in merged_transform_paths - ] - - self.array_removes = { - path: values - for path, values in self.array_removes.items() - if path in merged_transform_paths - } - - self.array_unions = { - path: values - for path, values in self.array_unions.items() - if path in merged_transform_paths - } - - def apply_merge(self, merge): - if merge is True: # merge all fields - self._apply_merge_all() - else: - self._apply_merge_paths(merge) - - def _get_update_mask(self, allow_empty_mask=False): - # Mask uses dotted / quoted paths. - mask_paths = [ - field_path.to_api_repr() - for field_path in self.merge - if field_path not in self.transform_merge - ] - - if mask_paths or allow_empty_mask: - return common_pb2.DocumentMask(field_paths=mask_paths) - - -def pbs_for_set_with_merge(document_path, document_data, merge): - """Make ``Write`` protobufs for ``set()`` methods. - - Args: - document_path (str): A fully-qualified document path. - document_data (dict): Property names and values to use for - replacing a document. - merge (Optional[bool] or Optional[List]): - If True, merge all fields; else, merge only the named fields. - - Returns: - List[google.cloud.firestore_v1beta1.types.Write]: One - or two ``Write`` protobuf instances for ``set()``. - """ - extractor = DocumentExtractorForMerge(document_data) - extractor.apply_merge(merge) - - merge_empty = not document_data - - write_pbs = [] - - if extractor.has_updates or merge_empty: - write_pbs.append( - extractor.get_update_pb(document_path, allow_empty_mask=merge_empty) - ) - - if extractor.transform_paths: - transform_pb = extractor.get_transform_pb(document_path) - write_pbs.append(transform_pb) - - return write_pbs - - -class DocumentExtractorForUpdate(DocumentExtractor): - """ Break document data up into actual data and transforms. - """ - - def __init__(self, document_data): - super(DocumentExtractorForUpdate, self).__init__(document_data) - self.top_level_paths = sorted( - [FieldPath.from_string(key) for key in document_data] - ) - tops = set(self.top_level_paths) - for top_level_path in self.top_level_paths: - for ancestor in top_level_path.lineage(): - if ancestor in tops: - raise ValueError( - "Conflicting field path: {}, {}".format( - top_level_path, ancestor - ) - ) - - for field_path in self.deleted_fields: - if field_path not in tops: - raise ValueError( - "Cannot update with nest delete: {}".format(field_path) - ) - - def _get_document_iterator(self, prefix_path): - return extract_fields(self.document_data, prefix_path, expand_dots=True) - - def _get_update_mask(self, allow_empty_mask=False): - mask_paths = [] - for field_path in self.top_level_paths: - if field_path not in self.transform_paths: - mask_paths.append(field_path.to_api_repr()) - - return common_pb2.DocumentMask(field_paths=mask_paths) - - -def pbs_for_update(document_path, field_updates, option): - """Make ``Write`` protobufs for ``update()`` methods. - - Args: - document_path (str): A fully-qualified document path. - field_updates (dict): Field names or paths to update and values - to update with. - option (optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - List[google.cloud.firestore_v1beta1.types.Write]: One - or two ``Write`` protobuf instances for ``update()``. - """ - extractor = DocumentExtractorForUpdate(field_updates) - - if extractor.empty_document: - raise ValueError("Cannot update with an empty document.") - - if option is None: # Default is to use ``exists=True``. - option = ExistsOption(exists=True) - - write_pbs = [] - - if extractor.field_paths or extractor.deleted_fields: - update_pb = extractor.get_update_pb(document_path) - option.modify_write(update_pb) - write_pbs.append(update_pb) - - if extractor.has_transforms: - transform_pb = extractor.get_transform_pb(document_path) - if not write_pbs: - # NOTE: set the write option on the ``transform_pb`` only if there - # is no ``update_pb`` - option.modify_write(transform_pb) - write_pbs.append(transform_pb) - - return write_pbs - - -def pb_for_delete(document_path, option): - """Make a ``Write`` protobuf for ``delete()`` methods. - - Args: - document_path (str): A fully-qualified document path. - option (optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - google.cloud.firestore_v1beta1.types.Write: A - ``Write`` protobuf instance for the ``delete()``. - """ - write_pb = write_pb2.Write(delete=document_path) - if option is not None: - option.modify_write(write_pb) - - return write_pb - - -class ReadAfterWriteError(Exception): - """Raised when a read is attempted after a write. - - Raised by "read" methods that use transactions. - """ - - -def get_transaction_id(transaction, read_operation=True): - """Get the transaction ID from a ``Transaction`` object. - - Args: - transaction (Optional[~.firestore_v1beta1.transaction.\ - Transaction]): An existing transaction that this query will - run in. - read_operation (Optional[bool]): Indicates if the transaction ID - will be used in a read operation. Defaults to :data:`True`. - - Returns: - Optional[bytes]: The ID of the transaction, or :data:`None` if the - ``transaction`` is :data:`None`. - - Raises: - ValueError: If the ``transaction`` is not in progress (only if - ``transaction`` is not :data:`None`). - ReadAfterWriteError: If the ``transaction`` has writes stored on - it and ``read_operation`` is :data:`True`. - """ - if transaction is None: - return None - else: - if not transaction.in_progress: - raise ValueError(INACTIVE_TXN) - if read_operation and len(transaction._write_pbs) > 0: - raise ReadAfterWriteError(READ_AFTER_WRITE_ERROR) - return transaction.id - - -def metadata_with_prefix(prefix, **kw): - """Create RPC metadata containing a prefix. - - Args: - prefix (str): appropriate resource path. - - Returns: - List[Tuple[str, str]]: RPC metadata with supplied prefix - """ - return [("google-cloud-resource-prefix", prefix)] - - -class WriteOption(object): - """Option used to assert a condition on a write operation.""" - - def modify_write(self, write_pb, no_create_msg=None): - """Modify a ``Write`` protobuf based on the state of this write option. - - This is a virtual method intended to be implemented by subclasses. - - Args: - write_pb (google.cloud.firestore_v1beta1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - no_create_msg (Optional[str]): A message to use to indicate that - a create operation is not allowed. - - Raises: - NotImplementedError: Always, this method is virtual. - """ - raise NotImplementedError - - -class LastUpdateOption(WriteOption): - """Option used to assert a "last update" condition on a write operation. - - This will typically be created by - :meth:`~google.cloud.firestore_v1beta1.client.Client.write_option`. - - Args: - last_update_time (google.protobuf.timestamp_pb2.Timestamp): A - timestamp. When set, the target document must exist and have - been last updated at that time. Protobuf ``update_time`` timestamps - are typically returned from methods that perform write operations - as part of a "write result" protobuf or directly. - """ - - def __init__(self, last_update_time): - self._last_update_time = last_update_time - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._last_update_time == other._last_update_time - - def modify_write(self, write_pb, **unused_kwargs): - """Modify a ``Write`` protobuf based on the state of this write option. - - The ``last_update_time`` is added to ``write_pb`` as an "update time" - precondition. When set, the target document must exist and have been - last updated at that time. - - Args: - write_pb (google.cloud.firestore_v1beta1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - unused_kwargs (Dict[str, Any]): Keyword arguments accepted by - other subclasses that are unused here. - """ - current_doc = types.Precondition(update_time=self._last_update_time) - write_pb.current_document.CopyFrom(current_doc) - - -class ExistsOption(WriteOption): - """Option used to assert existence on a write operation. - - This will typically be created by - :meth:`~google.cloud.firestore_v1beta1.client.Client.write_option`. - - Args: - exists (bool): Indicates if the document being modified - should already exist. - """ - - def __init__(self, exists): - self._exists = exists - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._exists == other._exists - - def modify_write(self, write_pb, **unused_kwargs): - """Modify a ``Write`` protobuf based on the state of this write option. - - If: - - * ``exists=True``, adds a precondition that requires existence - * ``exists=False``, adds a precondition that requires non-existence - - Args: - write_pb (google.cloud.firestore_v1beta1.types.Write): A - ``Write`` protobuf instance to be modified with a precondition - determined by the state of this option. - unused_kwargs (Dict[str, Any]): Keyword arguments accepted by - other subclasses that are unused here. - """ - current_doc = types.Precondition(exists=self._exists) - write_pb.current_document.CopyFrom(current_doc) diff --git a/firestore/google/cloud/firestore_v1beta1/batch.py b/firestore/google/cloud/firestore_v1beta1/batch.py deleted file mode 100644 index f3e1018abc96..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/batch.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpers for batch requests to the Google Cloud Firestore API.""" - - -from google.cloud.firestore_v1beta1 import _helpers - - -class WriteBatch(object): - """Accumulate write operations to be sent in a batch. - - This has the same set of methods for write operations that - :class:`~google.cloud.firestore_v1beta1.document.DocumentReference` - does, e.g. - :meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.create`. - - Args: - client (~.firestore_v1beta1.client.Client): The client that - created this batch. - """ - - def __init__(self, client): - self._client = client - self._write_pbs = [] - self.write_results = None - self.commit_time = None - - def _add_write_pbs(self, write_pbs): - """Add `Write`` protobufs to this transaction. - - This method intended to be over-ridden by subclasses. - - Args: - write_pbs (List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.Write]): A list of write protobufs to be added. - """ - self._write_pbs.extend(write_pbs) - - def create(self, reference, document_data): - """Add a "change" to this batch to create a document. - - If the document given by ``reference`` already exists, then this - batch will fail when :meth:`commit`-ed. - - Args: - reference (~.firestore_v1beta1.document.DocumentReference): A - document reference to be created in this batch. - document_data (dict): Property names and values to use for - creating a document. - """ - write_pbs = _helpers.pbs_for_create(reference._document_path, document_data) - self._add_write_pbs(write_pbs) - - def set(self, reference, document_data, merge=False): - """Add a "change" to replace a document. - - See - :meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.set` - for more information on how ``option`` determines how the change is - applied. - - Args: - reference (~.firestore_v1beta1.document.DocumentReference): - A document reference that will have values set in this batch. - document_data (dict): - Property names and values to use for replacing a document. - merge (Optional[bool] or Optional[List]): - If True, apply merging instead of overwriting the state - of the document. - """ - if merge is not False: - write_pbs = _helpers.pbs_for_set_with_merge( - reference._document_path, document_data, merge - ) - else: - write_pbs = _helpers.pbs_for_set_no_merge( - reference._document_path, document_data - ) - - self._add_write_pbs(write_pbs) - - def update(self, reference, field_updates, option=None): - """Add a "change" to update a document. - - See - :meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.update` - for more information on ``field_updates`` and ``option``. - - Args: - reference (~.firestore_v1beta1.document.DocumentReference): A - document reference that will be deleted in this batch. - field_updates (dict): Field names or paths to update and values - to update with. - option (Optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - """ - if option.__class__.__name__ == "ExistsOption": - raise ValueError("you must not pass an explicit write option to " "update.") - write_pbs = _helpers.pbs_for_update( - reference._document_path, field_updates, option - ) - self._add_write_pbs(write_pbs) - - def delete(self, reference, option=None): - """Add a "change" to delete a document. - - See - :meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.delete` - for more information on how ``option`` determines how the change is - applied. - - Args: - reference (~.firestore_v1beta1.document.DocumentReference): A - document reference that will be deleted in this batch. - option (Optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - """ - write_pb = _helpers.pb_for_delete(reference._document_path, option) - self._add_write_pbs([write_pb]) - - def commit(self): - """Commit the changes accumulated in this batch. - - Returns: - List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.WriteResult, ...]: The write results corresponding - to the changes committed, returned in the same order as the - changes were applied to this batch. A write result contains an - ``update_time`` field. - """ - commit_response = self._client._firestore_api.commit( - self._client._database_string, - self._write_pbs, - transaction=None, - metadata=self._client._rpc_metadata, - ) - - self._write_pbs = [] - self.write_results = results = list(commit_response.write_results) - self.commit_time = commit_response.commit_time - return results - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - if exc_type is None: - self.commit() diff --git a/firestore/google/cloud/firestore_v1beta1/client.py b/firestore/google/cloud/firestore_v1beta1/client.py deleted file mode 100644 index 50036f0adb30..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/client.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud Firestore API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`~google.cloud.firestore_v1beta1.client.Client` owns a - :class:`~google.cloud.firestore_v1beta1.collection.CollectionReference` -* a :class:`~google.cloud.firestore_v1beta1.client.Client` owns a - :class:`~google.cloud.firestore_v1beta1.document.DocumentReference` -""" -import warnings - -from google.cloud.client import ClientWithProject - -from google.cloud.firestore_v1beta1 import _helpers -from google.cloud.firestore_v1beta1 import types -from google.cloud.firestore_v1beta1.batch import WriteBatch -from google.cloud.firestore_v1beta1.collection import CollectionReference -from google.cloud.firestore_v1beta1.document import DocumentReference -from google.cloud.firestore_v1beta1.document import DocumentSnapshot -from google.cloud.firestore_v1beta1.field_path import render_field_path -from google.cloud.firestore_v1beta1.gapic import firestore_client -from google.cloud.firestore_v1beta1.gapic.transports import firestore_grpc_transport -from google.cloud.firestore_v1beta1.transaction import Transaction - - -DEFAULT_DATABASE = "(default)" -"""str: The default database used in a :class:`~google.cloud.firestore.client.Client`.""" -_BAD_OPTION_ERR = ( - "Exactly one of ``last_update_time`` or ``exists`` " "must be provided." -) -_BAD_DOC_TEMPLATE = ( - "Document {!r} appeared in response but was not present among references" -) -_ACTIVE_TXN = "There is already an active transaction." -_INACTIVE_TXN = "There is no active transaction." -_V1BETA1_DEPRECATED_MESSAGE = ( - "The 'v1beta1' API endpoint is deprecated. " - "The client/library which supports it will be removed in a future release." -) - - -class Client(ClientWithProject): - """Client for interacting with Google Cloud Firestore API. - - .. note:: - - Since the Cloud Firestore API requires the gRPC transport, no - ``_http`` argument is accepted by this class. - - Args: - project (Optional[str]): The project which the client acts on behalf - of. If not passed, falls back to the default inferred - from the environment. - credentials (Optional[~google.auth.credentials.Credentials]): The - OAuth2 Credentials to use for this client. If not passed, falls - back to the default inferred from the environment. - database (Optional[str]): The database name that the client targets. - For now, :attr:`DEFAULT_DATABASE` (the default value) is the - only valid database. - """ - - SCOPE = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/datastore", - ) - """The scopes required for authenticating with the Firestore service.""" - - _firestore_api_internal = None - _database_string_internal = None - _rpc_metadata_internal = None - - def __init__(self, project=None, credentials=None, database=DEFAULT_DATABASE): - warnings.warn(_V1BETA1_DEPRECATED_MESSAGE, DeprecationWarning, stacklevel=2) - # NOTE: This API has no use for the _http argument, but sending it - # will have no impact since the _http() @property only lazily - # creates a working HTTP object. - super(Client, self).__init__( - project=project, credentials=credentials, _http=None - ) - self._database = database - - @property - def _firestore_api(self): - """Lazy-loading getter GAPIC Firestore API. - - Returns: - ~.gapic.firestore.v1beta1.firestore_client.FirestoreClient: The - GAPIC client with the credentials of the current client. - """ - if self._firestore_api_internal is None: - # Use a custom channel. - # We need this in order to set appropriate keepalive options. - channel = firestore_grpc_transport.FirestoreGrpcTransport.create_channel( - self._target, - credentials=self._credentials, - options={"grpc.keepalive_time_ms": 30000}.items(), - ) - - self._transport = firestore_grpc_transport.FirestoreGrpcTransport( - address=self._target, channel=channel - ) - - self._firestore_api_internal = firestore_client.FirestoreClient( - transport=self._transport - ) - - return self._firestore_api_internal - - @property - def _target(self): - """Return the target (where the API is). - - Returns: - str: The location of the API. - """ - return firestore_client.FirestoreClient.SERVICE_ADDRESS - - @property - def _database_string(self): - """The database string corresponding to this client's project. - - This value is lazy-loaded and cached. - - Will be of the form - - ``projects/{project_id}/databases/{database_id}`` - - but ``database_id == '(default)'`` for the time being. - - Returns: - str: The fully-qualified database string for the current - project. (The default database is also in this string.) - """ - if self._database_string_internal is None: - # NOTE: database_root_path() is a classmethod, so we don't use - # self._firestore_api (it isn't necessary). - db_str = firestore_client.FirestoreClient.database_root_path( - self.project, self._database - ) - self._database_string_internal = db_str - - return self._database_string_internal - - @property - def _rpc_metadata(self): - """The RPC metadata for this client's associated database. - - Returns: - Sequence[Tuple(str, str)]: RPC metadata with resource prefix - for the database associated with this client. - """ - if self._rpc_metadata_internal is None: - self._rpc_metadata_internal = _helpers.metadata_with_prefix( - self._database_string - ) - - return self._rpc_metadata_internal - - def collection(self, *collection_path): - """Get a reference to a collection. - - For a top-level collection: - - .. code-block:: python - - >>> client.collection('top') - - For a sub-collection: - - .. code-block:: python - - >>> client.collection('mydocs/doc/subcol') - >>> # is the same as - >>> client.collection('mydocs', 'doc', 'subcol') - - Sub-collections can be nested deeper in a similar fashion. - - Args: - collection_path (Tuple[str, ...]): Can either be - - * A single ``/``-delimited path to a collection - * A tuple of collection path segments - - Returns: - ~.firestore_v1beta1.collection.CollectionReference: A reference - to a collection in the Firestore database. - """ - if len(collection_path) == 1: - path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER) - else: - path = collection_path - - return CollectionReference(*path, client=self) - - def document(self, *document_path): - """Get a reference to a document in a collection. - - For a top-level document: - - .. code-block:: python - - >>> client.document('collek/shun') - >>> # is the same as - >>> client.document('collek', 'shun') - - For a document in a sub-collection: - - .. code-block:: python - - >>> client.document('mydocs/doc/subcol/child') - >>> # is the same as - >>> client.document('mydocs', 'doc', 'subcol', 'child') - - Documents in sub-collections can be nested deeper in a similar fashion. - - Args: - document_path (Tuple[str, ...]): Can either be - - * A single ``/``-delimited path to a document - * A tuple of document path segments - - Returns: - ~.firestore_v1beta1.document.DocumentReference: A reference - to a document in a collection. - """ - if len(document_path) == 1: - path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER) - else: - path = document_path - - return DocumentReference(*path, client=self) - - @staticmethod - def field_path(*field_names): - """Create a **field path** from a list of nested field names. - - A **field path** is a ``.``-delimited concatenation of the field - names. It is used to represent a nested field. For example, - in the data - - .. code-block:: python - - data = { - 'aa': { - 'bb': { - 'cc': 10, - }, - }, - } - - the field path ``'aa.bb.cc'`` represents the data stored in - ``data['aa']['bb']['cc']``. - - Args: - field_names (Tuple[str, ...]): The list of field names. - - Returns: - str: The ``.``-delimited field path. - """ - return render_field_path(field_names) - - @staticmethod - def write_option(**kwargs): - """Create a write option for write operations. - - Write operations include :meth:`~google.cloud.DocumentReference.set`, - :meth:`~google.cloud.DocumentReference.update` and - :meth:`~google.cloud.DocumentReference.delete`. - - One of the following keyword arguments must be provided: - - * ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\ - Timestamp`): A timestamp. When set, the target document must - exist and have been last updated at that time. Protobuf - ``update_time`` timestamps are typically returned from methods - that perform write operations as part of a "write result" - protobuf or directly. - * ``exists`` (:class:`bool`): Indicates if the document being modified - should already exist. - - Providing no argument would make the option have no effect (so - it is not allowed). Providing multiple would be an apparent - contradiction, since ``last_update_time`` assumes that the - document **was** updated (it can't have been updated if it - doesn't exist) and ``exists`` indicate that it is unknown if the - document exists or not. - - Args: - kwargs (Dict[str, Any]): The keyword arguments described above. - - Raises: - TypeError: If anything other than exactly one argument is - provided by the caller. - """ - if len(kwargs) != 1: - raise TypeError(_BAD_OPTION_ERR) - - name, value = kwargs.popitem() - if name == "last_update_time": - return _helpers.LastUpdateOption(value) - elif name == "exists": - return _helpers.ExistsOption(value) - else: - extra = "{!r} was provided".format(name) - raise TypeError(_BAD_OPTION_ERR, extra) - - def get_all(self, references, field_paths=None, transaction=None): - """Retrieve a batch of documents. - - .. note:: - - Documents returned by this method are not guaranteed to be - returned in the same order that they are given in ``references``. - - .. note:: - - If multiple ``references`` refer to the same document, the server - will only return one result. - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - references (List[.DocumentReference, ...]): Iterable of document - references to be retrieved. - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. If - no value is provided, all fields will be returned. - transaction (Optional[~.firestore_v1beta1.transaction.\ - Transaction]): An existing transaction that these - ``references`` will be retrieved in. - - Yields: - .DocumentSnapshot: The next document snapshot that fulfills the - query, or :data:`None` if the document does not exist. - """ - document_paths, reference_map = _reference_info(references) - mask = _get_doc_mask(field_paths) - response_iterator = self._firestore_api.batch_get_documents( - self._database_string, - document_paths, - mask, - transaction=_helpers.get_transaction_id(transaction), - metadata=self._rpc_metadata, - ) - - for get_doc_response in response_iterator: - yield _parse_batch_get(get_doc_response, reference_map, self) - - def collections(self): - """List top-level collections of the client's database. - - Returns: - Sequence[~.firestore_v1beta1.collection.CollectionReference]: - iterator of subcollections of the current document. - """ - iterator = self._firestore_api.list_collection_ids( - self._database_string, metadata=self._rpc_metadata - ) - iterator.client = self - iterator.item_to_value = _item_to_collection_ref - return iterator - - def batch(self): - """Get a batch instance from this client. - - Returns: - ~.firestore_v1beta1.batch.WriteBatch: A "write" batch to be - used for accumulating document changes and sending the changes - all at once. - """ - return WriteBatch(self) - - def transaction(self, **kwargs): - """Get a transaction that uses this client. - - See :class:`~google.cloud.firestore_v1beta1.transaction.Transaction` - for more information on transactions and the constructor arguments. - - Args: - kwargs (Dict[str, Any]): The keyword arguments (other than - ``client``) to pass along to the - :class:`~google.cloud.firestore_v1beta1.transaction.Transaction` - constructor. - - Returns: - ~.firestore_v1beta1.transaction.Transaction: A transaction - attached to this client. - """ - return Transaction(self, **kwargs) - - -def _reference_info(references): - """Get information about document references. - - Helper for :meth:`~google.cloud.firestore_v1beta1.client.Client.get_all`. - - Args: - references (List[.DocumentReference, ...]): Iterable of document - references. - - Returns: - Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of - - * fully-qualified documents paths for each reference in ``references`` - * a mapping from the paths to the original reference. (If multiple - ``references`` contains multiple references to the same document, - that key will be overwritten in the result.) - """ - document_paths = [] - reference_map = {} - for reference in references: - doc_path = reference._document_path - document_paths.append(doc_path) - reference_map[doc_path] = reference - - return document_paths, reference_map - - -def _get_reference(document_path, reference_map): - """Get a document reference from a dictionary. - - This just wraps a simple dictionary look-up with a helpful error that is - specific to :meth:`~google.cloud.firestore.client.Client.get_all`, the - **public** caller of this function. - - Args: - document_path (str): A fully-qualified document path. - reference_map (Dict[str, .DocumentReference]): A mapping (produced - by :func:`_reference_info`) of fully-qualified document paths to - document references. - - Returns: - .DocumentReference: The matching reference. - - Raises: - ValueError: If ``document_path`` has not been encountered. - """ - try: - return reference_map[document_path] - except KeyError: - msg = _BAD_DOC_TEMPLATE.format(document_path) - raise ValueError(msg) - - -def _parse_batch_get(get_doc_response, reference_map, client): - """Parse a `BatchGetDocumentsResponse` protobuf. - - Args: - get_doc_response (~google.cloud.proto.firestore.v1beta1.\ - firestore_pb2.BatchGetDocumentsResponse): A single response (from - a stream) containing the "get" response for a document. - reference_map (Dict[str, .DocumentReference]): A mapping (produced - by :func:`_reference_info`) of fully-qualified document paths to - document references. - client (~.firestore_v1beta1.client.Client): A client that has - a document factory. - - Returns: - [.DocumentSnapshot]: The retrieved snapshot. - - Raises: - ValueError: If the response has a ``result`` field (a oneof) other - than ``found`` or ``missing``. - """ - result_type = get_doc_response.WhichOneof("result") - if result_type == "found": - reference = _get_reference(get_doc_response.found.name, reference_map) - data = _helpers.decode_dict(get_doc_response.found.fields, client) - snapshot = DocumentSnapshot( - reference, - data, - exists=True, - read_time=get_doc_response.read_time, - create_time=get_doc_response.found.create_time, - update_time=get_doc_response.found.update_time, - ) - elif result_type == "missing": - snapshot = DocumentSnapshot( - None, - None, - exists=False, - read_time=get_doc_response.read_time, - create_time=None, - update_time=None, - ) - else: - raise ValueError( - "`BatchGetDocumentsResponse.result` (a oneof) had a field other " - "than `found` or `missing` set, or was unset" - ) - return snapshot - - -def _get_doc_mask(field_paths): - """Get a document mask if field paths are provided. - - Args: - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. - - Returns: - Optional[google.cloud.firestore_v1beta1.types.DocumentMask]: A mask - to project documents to a restricted set of field paths. - """ - if field_paths is None: - return None - else: - return types.DocumentMask(field_paths=field_paths) - - -def _item_to_collection_ref(iterator, item): - """Convert collection ID to collection ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (str): ID of the collection - """ - return iterator.client.collection(item) diff --git a/firestore/google/cloud/firestore_v1beta1/collection.py b/firestore/google/cloud/firestore_v1beta1/collection.py deleted file mode 100644 index 45b1ddae03b3..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/collection.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Classes for representing collections for the Google Cloud Firestore API.""" -import random -import warnings - -import six - -from google.cloud.firestore_v1beta1 import _helpers -from google.cloud.firestore_v1beta1 import query as query_mod -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.watch import Watch -from google.cloud.firestore_v1beta1 import document - -_AUTO_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - - -class CollectionReference(object): - """A reference to a collection in a Firestore database. - - The collection may already exist or this class can facilitate creation - of documents within the collection. - - Args: - path (Tuple[str, ...]): The components in the collection path. - This is a series of strings representing each collection and - sub-collection ID, as well as the document IDs for any documents - that contain a sub-collection. - kwargs (dict): The keyword arguments for the constructor. The only - supported keyword is ``client`` and it must be a - :class:`~google.cloud.firestore_v1beta1.client.Client` if - provided. It represents the client that created this collection - reference. - - Raises: - ValueError: if - - * the ``path`` is empty - * there are an even number of elements - * a collection ID in ``path`` is not a string - * a document ID in ``path`` is not a string - TypeError: If a keyword other than ``client`` is used. - """ - - def __init__(self, *path, **kwargs): - _helpers.verify_path(path, is_collection=True) - self._path = path - self._client = kwargs.pop("client", None) - if kwargs: - raise TypeError( - "Received unexpected arguments", kwargs, "Only `client` is supported" - ) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._path == other._path and self._client == other._client - - @property - def id(self): - """The collection identifier. - - Returns: - str: The last component of the path. - """ - return self._path[-1] - - @property - def parent(self): - """Document that owns the current collection. - - Returns: - Optional[~.firestore_v1beta1.document.DocumentReference]: The - parent document, if the current collection is not a - top-level collection. - """ - if len(self._path) == 1: - return None - else: - parent_path = self._path[:-1] - return self._client.document(*parent_path) - - def document(self, document_id=None): - """Create a sub-document underneath the current collection. - - Args: - document_id (Optional[str]): The document identifier - within the current collection. If not provided, will default - to a random 20 character string composed of digits, - uppercase and lowercase and letters. - - Returns: - ~.firestore_v1beta1.document.DocumentReference: The child - document. - """ - if document_id is None: - document_id = _auto_id() - - child_path = self._path + (document_id,) - return self._client.document(*child_path) - - def _parent_info(self): - """Get fully-qualified parent path and prefix for this collection. - - Returns: - Tuple[str, str]: Pair of - - * the fully-qualified (with database and project) path to the - parent of this collection (will either be the database path - or a document path). - * the prefix to a document in this collection. - """ - parent_doc = self.parent - if parent_doc is None: - parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join( - (self._client._database_string, "documents") - ) - else: - parent_path = parent_doc._document_path - - expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id)) - return parent_path, expected_prefix - - def add(self, document_data, document_id=None): - """Create a document in the Firestore database with the provided data. - - Args: - document_data (dict): Property names and values to use for - creating the document. - document_id (Optional[str]): The document identifier within the - current collection. If not provided, an ID will be - automatically assigned by the server (the assigned ID will be - a random 20 character string composed of digits, - uppercase and lowercase letters). - - Returns: - Tuple[google.protobuf.timestamp_pb2.Timestamp, \ - ~.firestore_v1beta1.document.DocumentReference]: Pair of - - * The ``update_time`` when the document was created (or - overwritten). - * A document reference for the created document. - - Raises: - ~google.cloud.exceptions.Conflict: If ``document_id`` is provided - and the document already exists. - """ - if document_id is None: - parent_path, expected_prefix = self._parent_info() - - document_pb = document_pb2.Document() - - created_document_pb = self._client._firestore_api.create_document( - parent_path, - collection_id=self.id, - document_id=None, - document=document_pb, - mask=None, - metadata=self._client._rpc_metadata, - ) - - new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix) - document_ref = self.document(new_document_id) - set_result = document_ref.set(document_data) - return set_result.update_time, document_ref - else: - document_ref = self.document(document_id) - write_result = document_ref.create(document_data) - return write_result.update_time, document_ref - - def list_documents(self, page_size=None): - """List all subdocuments of the current collection. - - Args: - page_size (Optional[int]]): The maximum number of documents - in each page of results from this request. Non-positive values - are ignored. Defaults to a sensible value set by the API. - - Returns: - Sequence[~.firestore_v1beta1.collection.DocumentReference]: - iterator of subdocuments of the current collection. If the - collection does not exist at the time of `snapshot`, the - iterator will be empty - """ - parent, _ = self._parent_info() - - iterator = self._client._firestore_api.list_documents( - parent, - self.id, - page_size=page_size, - show_missing=True, - metadata=self._client._rpc_metadata, - ) - iterator.collection = self - iterator.item_to_value = _item_to_document_ref - return iterator - - def select(self, field_paths): - """Create a "select" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.select` for - more information on this method. - - Args: - field_paths (Iterable[str, ...]): An iterable of field paths - (``.``-delimited list of field names) to use as a projection - of document fields in the query results. - - Returns: - ~.firestore_v1beta1.query.Query: A "projected" query. - """ - query = query_mod.Query(self) - return query.select(field_paths) - - def where(self, field_path, op_string, value): - """Create a "where" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.where` for - more information on this method. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) for the field to filter on. - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=`` - and ``>``. - value (Any): The value to compare the field against in the filter. - If ``value`` is :data:`None` or a NaN, then ``==`` is the only - allowed operation. - - Returns: - ~.firestore_v1beta1.query.Query: A filtered query. - """ - query = query_mod.Query(self) - return query.where(field_path, op_string, value) - - def order_by(self, field_path, **kwargs): - """Create an "order by" query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` for - more information on this method. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) on which to order the query results. - kwargs (Dict[str, Any]): The keyword arguments to pass along - to the query. The only supported keyword is ``direction``, see - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` - for more information. - - Returns: - ~.firestore_v1beta1.query.Query: An "order by" query. - """ - query = query_mod.Query(self) - return query.order_by(field_path, **kwargs) - - def limit(self, count): - """Create a limited query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.limit` for - more information on this method. - - Args: - count (int): Maximum number of documents to return that match - the query. - - Returns: - ~.firestore_v1beta1.query.Query: A limited query. - """ - query = query_mod.Query(self) - return query.limit(count) - - def offset(self, num_to_skip): - """Skip to an offset in a query with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.offset` for - more information on this method. - - Args: - num_to_skip (int): The number of results to skip at the beginning - of query results. (Must be non-negative.) - - Returns: - ~.firestore_v1beta1.query.Query: An offset query. - """ - query = query_mod.Query(self) - return query.offset(num_to_skip) - - def start_at(self, document_fields): - """Start query at a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.start_at` for - more information on this method. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. - """ - query = query_mod.Query(self) - return query.start_at(document_fields) - - def start_after(self, document_fields): - """Start query after a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.start_after` for - more information on this method. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. - """ - query = query_mod.Query(self) - return query.start_after(document_fields) - - def end_before(self, document_fields): - """End query before a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.end_before` for - more information on this method. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. - """ - query = query_mod.Query(self) - return query.end_before(document_fields) - - def end_at(self, document_fields): - """End query at a cursor with this collection as parent. - - See - :meth:`~google.cloud.firestore_v1beta1.query.Query.end_at` for - more information on this method. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. - """ - query = query_mod.Query(self) - return query.end_at(document_fields) - - def get(self, transaction=None): - """Deprecated alias for :meth:`stream`.""" - warnings.warn( - "'Collection.get' is deprecated: please use 'Collection.stream' instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.stream(transaction=transaction) - - def stream(self, transaction=None): - """Read the documents in this collection. - - This sends a ``RunQuery`` RPC and then returns an iterator which - consumes each document returned in the stream of ``RunQueryResponse`` - messages. - - .. note:: - - The underlying stream of responses will time out after - the ``max_rpc_timeout_millis`` value set in the GAPIC - client configuration for the ``RunQuery`` API. Snapshots - not consumed from the iterator before that point will be lost. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - transaction (Optional[~.firestore_v1beta1.transaction.\ - Transaction]): An existing transaction that the query will - run in. - - Yields: - ~.firestore_v1beta1.document.DocumentSnapshot: The next - document that fulfills the query. - """ - query = query_mod.Query(self) - return query.stream(transaction=transaction) - - def on_snapshot(self, callback): - """Monitor the documents in this collection. - - This starts a watch on this collection using a background thread. The - provided callback is run on the snapshot of the documents. - - Args: - callback(~.firestore.collection.CollectionSnapshot): a callback - to run when a change occurs. - - Example: - from google.cloud import firestore_v1beta1 - - db = firestore_v1beta1.Client() - collection_ref = db.collection(u'users') - - def on_snapshot(collection_snapshot): - for doc in collection_snapshot.documents: - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - # Watch this collection - collection_watch = collection_ref.on_snapshot(on_snapshot) - - # Terminate this watch - collection_watch.unsubscribe() - """ - return Watch.for_query( - query_mod.Query(self), - callback, - document.DocumentSnapshot, - document.DocumentReference, - ) - - -def _auto_id(): - """Generate a "random" automatically generated ID. - - Returns: - str: A 20 character string composed of digits, uppercase and - lowercase and letters. - """ - return "".join(random.choice(_AUTO_ID_CHARS) for _ in six.moves.xrange(20)) - - -def _item_to_document_ref(iterator, item): - """Convert Document resource to document ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (dict): document resource - """ - document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1] - return iterator.collection.document(document_id) diff --git a/firestore/google/cloud/firestore_v1beta1/document.py b/firestore/google/cloud/firestore_v1beta1/document.py deleted file mode 100644 index 8efd452556b6..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/document.py +++ /dev/null @@ -1,780 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Classes for representing documents for the Google Cloud Firestore API.""" - -import copy - -import six - -from google.api_core import exceptions -from google.cloud.firestore_v1beta1 import _helpers -from google.cloud.firestore_v1beta1 import field_path as field_path_module -from google.cloud.firestore_v1beta1.proto import common_pb2 -from google.cloud.firestore_v1beta1.watch import Watch - - -class DocumentReference(object): - """A reference to a document in a Firestore database. - - The document may already exist or can be created by this class. - - Args: - path (Tuple[str, ...]): The components in the document path. - This is a series of strings representing each collection and - sub-collection ID, as well as the document IDs for any documents - that contain a sub-collection (as well as the base document). - kwargs (dict): The keyword arguments for the constructor. The only - supported keyword is ``client`` and it must be a - :class:`~google.cloud.firestore_v1beta1.client.Client`. - It represents the client that created this document reference. - - Raises: - ValueError: if - - * the ``path`` is empty - * there are an even number of elements - * a collection ID in ``path`` is not a string - * a document ID in ``path`` is not a string - TypeError: If a keyword other than ``client`` is used. - """ - - _document_path_internal = None - - def __init__(self, *path, **kwargs): - _helpers.verify_path(path, is_collection=False) - self._path = path - self._client = kwargs.pop("client", None) - if kwargs: - raise TypeError( - "Received unexpected arguments", kwargs, "Only `client` is supported" - ) - - def __copy__(self): - """Shallow copy the instance. - - We leave the client "as-is" but tuple-unpack the path. - - Returns: - .DocumentReference: A copy of the current document. - """ - result = self.__class__(*self._path, client=self._client) - result._document_path_internal = self._document_path_internal - return result - - def __deepcopy__(self, unused_memo): - """Deep copy the instance. - - This isn't a true deep copy, wee leave the client "as-is" but - tuple-unpack the path. - - Returns: - .DocumentReference: A copy of the current document. - """ - return self.__copy__() - - def __eq__(self, other): - """Equality check against another instance. - - Args: - other (Any): A value to compare against. - - Returns: - Union[bool, NotImplementedType]: Indicating if the values are - equal. - """ - if isinstance(other, DocumentReference): - return self._client == other._client and self._path == other._path - else: - return NotImplemented - - def __hash__(self): - return hash(self._path) + hash(self._client) - - def __ne__(self, other): - """Inequality check against another instance. - - Args: - other (Any): A value to compare against. - - Returns: - Union[bool, NotImplementedType]: Indicating if the values are - not equal. - """ - if isinstance(other, DocumentReference): - return self._client != other._client or self._path != other._path - else: - return NotImplemented - - @property - def path(self): - """Database-relative for this document. - - Returns: - str: The document's relative path. - """ - return "/".join(self._path) - - @property - def _document_path(self): - """Create and cache the full path for this document. - - Of the form: - - ``projects/{project_id}/databases/{database_id}/... - documents/{document_path}`` - - Returns: - str: The full document path. - - Raises: - ValueError: If the current document reference has no ``client``. - """ - if self._document_path_internal is None: - if self._client is None: - raise ValueError("A document reference requires a `client`.") - self._document_path_internal = _get_document_path(self._client, self._path) - - return self._document_path_internal - - @property - def id(self): - """The document identifier (within its collection). - - Returns: - str: The last component of the path. - """ - return self._path[-1] - - @property - def parent(self): - """Collection that owns the current document. - - Returns: - ~.firestore_v1beta1.collection.CollectionReference: The - parent collection. - """ - parent_path = self._path[:-1] - return self._client.collection(*parent_path) - - def collection(self, collection_id): - """Create a sub-collection underneath the current document. - - Args: - collection_id (str): The sub-collection identifier (sometimes - referred to as the "kind"). - - Returns: - ~.firestore_v1beta1.collection.CollectionReference: The - child collection. - """ - child_path = self._path + (collection_id,) - return self._client.collection(*child_path) - - def create(self, document_data): - """Create the current document in the Firestore database. - - Args: - document_data (dict): Property names and values to use for - creating a document. - - Returns: - google.cloud.firestore_v1beta1.types.WriteResult: The - write result corresponding to the committed document. A write - result contains an ``update_time`` field. - - Raises: - ~google.cloud.exceptions.Conflict: If the document already exists. - """ - batch = self._client.batch() - batch.create(self, document_data) - write_results = batch.commit() - return _first_write_result(write_results) - - def set(self, document_data, merge=False): - """Replace the current document in the Firestore database. - - A write ``option`` can be specified to indicate preconditions of - the "set" operation. If no ``option`` is specified and this document - doesn't exist yet, this method will create it. - - Overwrites all content for the document with the fields in - ``document_data``. This method performs almost the same functionality - as :meth:`create`. The only difference is that this method doesn't - make any requirements on the existence of the document (unless - ``option`` is used), whereas as :meth:`create` will fail if the - document already exists. - - Args: - document_data (dict): Property names and values to use for - replacing a document. - merge (Optional[bool] or Optional[List]): - If True, apply merging instead of overwriting the state - of the document. - - Returns: - google.cloud.firestore_v1beta1.types.WriteResult: The - write result corresponding to the committed document. A write - result contains an ``update_time`` field. - """ - batch = self._client.batch() - batch.set(self, document_data, merge=merge) - write_results = batch.commit() - return _first_write_result(write_results) - - def update(self, field_updates, option=None): - """Update an existing document in the Firestore database. - - By default, this method verifies that the document exists on the - server before making updates. A write ``option`` can be specified to - override these preconditions. - - Each key in ``field_updates`` can either be a field name or a - **field path** (For more information on **field paths**, see - :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`.) To - illustrate this, consider a document with - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - }, - 'other': True, - } - - stored on the server. If the field name is used in the update: - - .. code-block:: python - - >>> field_updates = { - ... 'foo': { - ... 'quux': 800, - ... }, - ... } - >>> document.update(field_updates) - - then all of ``foo`` will be overwritten on the server and the new - value will be - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'quux': 800, - }, - 'other': True, - } - - On the other hand, if a ``.``-delimited **field path** is used in the - update: - - .. code-block:: python - - >>> field_updates = { - ... 'foo.quux': 800, - ... } - >>> document.update(field_updates) - - then only ``foo.quux`` will be updated on the server and the - field ``foo.bar`` will remain intact: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - 'quux': 800, - }, - 'other': True, - } - - .. warning:: - - A **field path** can only be used as a top-level key in - ``field_updates``. - - To delete / remove a field from an existing document, use the - :attr:`~google.cloud.firestore_v1beta1.transforms.DELETE_FIELD` - sentinel. So with the example above, sending - - .. code-block:: python - - >>> field_updates = { - ... 'other': firestore.DELETE_FIELD, - ... } - >>> document.update(field_updates) - - would update the value on the server to: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - }, - } - - To set a field to the current time on the server when the - update is received, use the - :attr:`~google.cloud.firestore_v1beta1.transforms.SERVER_TIMESTAMP` - sentinel. Sending - - .. code-block:: python - - >>> field_updates = { - ... 'foo.now': firestore.SERVER_TIMESTAMP, - ... } - >>> document.update(field_updates) - - would update the value on the server to: - - .. code-block:: python - - >>> snapshot = document.get() - >>> snapshot.to_dict() - { - 'foo': { - 'bar': 'baz', - 'now': datetime.datetime(2012, ...), - }, - 'other': True, - } - - Args: - field_updates (dict): Field names or paths to update and values - to update with. - option (Optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - google.cloud.firestore_v1beta1.types.WriteResult: The - write result corresponding to the updated document. A write - result contains an ``update_time`` field. - - Raises: - ~google.cloud.exceptions.NotFound: If the document does not exist. - """ - batch = self._client.batch() - batch.update(self, field_updates, option=option) - write_results = batch.commit() - return _first_write_result(write_results) - - def delete(self, option=None): - """Delete the current document in the Firestore database. - - Args: - option (Optional[~.firestore_v1beta1.client.WriteOption]): A - write option to make assertions / preconditions on the server - state of the document before applying changes. - - Returns: - google.protobuf.timestamp_pb2.Timestamp: The time that the delete - request was received by the server. If the document did not exist - when the delete was sent (i.e. nothing was deleted), this method - will still succeed and will still return the time that the - request was received by the server. - """ - write_pb = _helpers.pb_for_delete(self._document_path, option) - commit_response = self._client._firestore_api.commit( - self._client._database_string, - [write_pb], - transaction=None, - metadata=self._client._rpc_metadata, - ) - - return commit_response.commit_time - - def get(self, field_paths=None, transaction=None): - """Retrieve a snapshot of the current document. - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - field_paths (Optional[Iterable[str, ...]]): An iterable of field - paths (``.``-delimited list of field names) to use as a - projection of document fields in the returned results. If - no value is provided, all fields will be returned. - transaction (Optional[~.firestore_v1beta1.transaction.\ - Transaction]): An existing transaction that this reference - will be retrieved in. - - Returns: - ~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of - the current document. If the document does not exist at - the time of `snapshot`, the snapshot `reference`, `data`, - `update_time`, and `create_time` attributes will all be - `None` and `exists` will be `False`. - """ - if isinstance(field_paths, six.string_types): - raise ValueError("'field_paths' must be a sequence of paths, not a string.") - - if field_paths is not None: - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - else: - mask = None - - firestore_api = self._client._firestore_api - try: - document_pb = firestore_api.get_document( - self._document_path, - mask=mask, - transaction=_helpers.get_transaction_id(transaction), - metadata=self._client._rpc_metadata, - ) - except exceptions.NotFound: - data = None - exists = False - create_time = None - update_time = None - else: - data = _helpers.decode_dict(document_pb.fields, self._client) - exists = True - create_time = document_pb.create_time - update_time = document_pb.update_time - - return DocumentSnapshot( - reference=self, - data=data, - exists=exists, - read_time=None, # No server read_time available - create_time=create_time, - update_time=update_time, - ) - - def collections(self, page_size=None): - """List subcollections of the current document. - - Args: - page_size (Optional[int]]): The maximum number of collections - in each page of results from this request. Non-positive values - are ignored. Defaults to a sensible value set by the API. - - Returns: - Sequence[~.firestore_v1beta1.collection.CollectionReference]: - iterator of subcollections of the current document. If the - document does not exist at the time of `snapshot`, the - iterator will be empty - """ - iterator = self._client._firestore_api.list_collection_ids( - self._document_path, - page_size=page_size, - metadata=self._client._rpc_metadata, - ) - iterator.document = self - iterator.item_to_value = _item_to_collection_ref - return iterator - - def on_snapshot(self, callback): - """Watch this document. - - This starts a watch on this document using a background thread. The - provided callback is run on the snapshot. - - Args: - callback(~.firestore.document.DocumentSnapshot):a callback to run - when a change occurs - - Example: - from google.cloud import firestore_v1beta1 - - db = firestore_v1beta1.Client() - collection_ref = db.collection(u'users') - - def on_snapshot(document_snapshot): - doc = document_snapshot - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - doc_ref = db.collection(u'users').document( - u'alovelace' + unique_resource_id()) - - # Watch this document - doc_watch = doc_ref.on_snapshot(on_snapshot) - - # Terminate this watch - doc_watch.unsubscribe() - """ - return Watch.for_document(self, callback, DocumentSnapshot, DocumentReference) - - -class DocumentSnapshot(object): - """A snapshot of document data in a Firestore database. - - This represents data retrieved at a specific time and may not contain - all fields stored for the document (i.e. a hand-picked selection of - fields may have been retrieved). - - Instances of this class are not intended to be constructed by hand, - rather they'll be returned as responses to various methods, such as - :meth:`~google.cloud.DocumentReference.get`. - - Args: - reference (~.firestore_v1beta1.document.DocumentReference): A - document reference corresponding to the document that contains - the data in this snapshot. - data (Dict[str, Any]): The data retrieved in the snapshot. - exists (bool): Indicates if the document existed at the time the - snapshot was retrieved. - read_time (google.protobuf.timestamp_pb2.Timestamp): The time that - this snapshot was read from the server. - create_time (google.protobuf.timestamp_pb2.Timestamp): The time that - this document was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): The time that - this document was last updated. - """ - - def __init__(self, reference, data, exists, read_time, create_time, update_time): - self._reference = reference - # We want immutable data, so callers can't modify this value - # out from under us. - self._data = copy.deepcopy(data) - self._exists = exists - self.read_time = read_time - """google.protobuf.timestamp_pb2.Timestamp: Time snapshot was read.""" - self.create_time = create_time - """google.protobuf.timestamp_pb2.Timestamp: Document's creation.""" - self.update_time = update_time - """google.protobuf.timestamp_pb2.Timestamp: Document's last update.""" - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._reference == other._reference and self._data == other._data - - def __hash__(self): - seconds = self.update_time.seconds - nanos = self.update_time.nanos - return hash(self._reference) + hash(seconds) + hash(nanos) - - @property - def _client(self): - """The client that owns the document reference for this snapshot. - - Returns: - ~.firestore_v1beta1.client.Client: The client that owns this - document. - """ - return self._reference._client - - @property - def exists(self): - """Existence flag. - - Indicates if the document existed at the time this snapshot - was retrieved. - - Returns: - bool: The existence flag. - """ - return self._exists - - @property - def id(self): - """The document identifier (within its collection). - - Returns: - str: The last component of the path of the document. - """ - return self._reference.id - - @property - def reference(self): - """Document reference corresponding to document that owns this data. - - Returns: - ~.firestore_v1beta1.document.DocumentReference: A document - reference corresponding to this document. - """ - return self._reference - - def get(self, field_path): - """Get a value from the snapshot data. - - If the data is nested, for example: - - .. code-block:: python - - >>> snapshot.to_dict() - { - 'top1': { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - }, - 'top6': b'\x00\x01 foo', - } - - a **field path** can be used to access the nested data. For - example: - - .. code-block:: python - - >>> snapshot.get('top1') - { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - } - >>> snapshot.get('top1.middle2') - { - 'bottom3': 20, - 'bottom4': 22, - } - >>> snapshot.get('top1.middle2.bottom3') - 20 - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - A copy is returned since the data may contain mutable values, - but the data stored in the snapshot must remain immutable. - - Args: - field_path (str): A field path (``.``-delimited list of - field names). - - Returns: - Any or None: - (A copy of) the value stored for the ``field_path`` or - None if snapshot document does not exist. - - Raises: - KeyError: If the ``field_path`` does not match nested data - in the snapshot. - """ - if not self._exists: - return None - nested_data = field_path_module.get_nested_value(field_path, self._data) - return copy.deepcopy(nested_data) - - def to_dict(self): - """Retrieve the data contained in this snapshot. - - A copy is returned since the data may contain mutable values, - but the data stored in the snapshot must remain immutable. - - Returns: - Dict[str, Any] or None: - The data in the snapshot. Returns None if reference - does not exist. - """ - if not self._exists: - return None - return copy.deepcopy(self._data) - - -def _get_document_path(client, path): - """Convert a path tuple into a full path string. - - Of the form: - - ``projects/{project_id}/databases/{database_id}/... - documents/{document_path}`` - - Args: - client (~.firestore_v1beta1.client.Client): The client that holds - configuration details and a GAPIC client object. - path (Tuple[str, ...]): The components in a document path. - - Returns: - str: The fully-qualified document path. - """ - parts = (client._database_string, "documents") + path - return _helpers.DOCUMENT_PATH_DELIMITER.join(parts) - - -def _consume_single_get(response_iterator): - """Consume a gRPC stream that should contain a single response. - - The stream will correspond to a ``BatchGetDocuments`` request made - for a single document. - - Args: - response_iterator (~google.cloud.exceptions.GrpcRendezvous): A - streaming iterator returned from a ``BatchGetDocuments`` - request. - - Returns: - ~google.cloud.proto.firestore.v1beta1.\ - firestore_pb2.BatchGetDocumentsResponse: The single "get" - response in the batch. - - Raises: - ValueError: If anything other than exactly one response is returned. - """ - # Calling ``list()`` consumes the entire iterator. - all_responses = list(response_iterator) - if len(all_responses) != 1: - raise ValueError( - "Unexpected response from `BatchGetDocumentsResponse`", - all_responses, - "Expected only one result", - ) - - return all_responses[0] - - -def _first_write_result(write_results): - """Get first write result from list. - - For cases where ``len(write_results) > 1``, this assumes the writes - occurred at the same time (e.g. if an update and transform are sent - at the same time). - - Args: - write_results (List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.WriteResult, ...]: The write results from a - ``CommitResponse``. - - Returns: - google.cloud.firestore_v1beta1.types.WriteResult: The - lone write result from ``write_results``. - - Raises: - ValueError: If there are zero write results. This is likely to - **never** occur, since the backend should be stable. - """ - if not write_results: - raise ValueError("Expected at least one write result") - - return write_results[0] - - -def _item_to_collection_ref(iterator, item): - """Convert collection ID to collection ref. - - Args: - iterator (google.api_core.page_iterator.GRPCIterator): - iterator response - item (str): ID of the collection - """ - return iterator.document.collection(item) diff --git a/firestore/google/cloud/firestore_v1beta1/field_path.py b/firestore/google/cloud/firestore_v1beta1/field_path.py deleted file mode 100644 index 1570aefb57a7..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/field_path.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for managing / converting field paths to / from strings.""" - -try: - from collections import abc as collections_abc -except ImportError: # Python 2.7 - import collections as collections_abc - -import re - -import six - - -_FIELD_PATH_MISSING_TOP = "{!r} is not contained in the data" -_FIELD_PATH_MISSING_KEY = "{!r} is not contained in the data for the key {!r}" -_FIELD_PATH_WRONG_TYPE = ( - "The data at {!r} is not a dictionary, so it cannot contain the key {!r}" -) - -_FIELD_PATH_DELIMITER = "." -_BACKSLASH = "\\" -_ESCAPED_BACKSLASH = _BACKSLASH * 2 -_BACKTICK = "`" -_ESCAPED_BACKTICK = _BACKSLASH + _BACKTICK - -_SIMPLE_FIELD_NAME = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$") -_LEADING_ALPHA_INVALID = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*[^_a-zA-Z0-9]") -PATH_ELEMENT_TOKENS = [ - ("SIMPLE", r"[_a-zA-Z][_a-zA-Z0-9]*"), # unquoted elements - ("QUOTED", r"`(?:\\`|[^`])*?`"), # quoted elements, unquoted - ("DOT", r"\."), # separator -] -TOKENS_PATTERN = "|".join("(?P<{}>{})".format(*pair) for pair in PATH_ELEMENT_TOKENS) -TOKENS_REGEX = re.compile(TOKENS_PATTERN) - - -def _tokenize_field_path(path): - """Lex a field path into tokens (including dots). - - Args: - path (str): field path to be lexed. - Returns: - List(str): tokens - """ - pos = 0 - get_token = TOKENS_REGEX.match - match = get_token(path) - while match is not None: - type_ = match.lastgroup - value = match.group(type_) - yield value - pos = match.end() - match = get_token(path, pos) - if pos != len(path): - raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:])) - - -def split_field_path(path): - """Split a field path into valid elements (without dots). - - Args: - path (str): field path to be lexed. - Returns: - List(str): tokens - Raises: - ValueError: if the path does not match the elements-interspersed- - with-dots pattern. - """ - if not path: - return [] - - elements = [] - want_dot = False - - for element in _tokenize_field_path(path): - if want_dot: - if element != ".": - raise ValueError("Invalid path: {}".format(path)) - else: - want_dot = False - else: - if element == ".": - raise ValueError("Invalid path: {}".format(path)) - elements.append(element) - want_dot = True - - if not want_dot or not elements: - raise ValueError("Invalid path: {}".format(path)) - - return elements - - -def parse_field_path(api_repr): - """Parse a **field path** from into a list of nested field names. - - See :func:`field_path` for more on **field paths**. - - Args: - api_repr (str): - The unique Firestore api representation which consists of - either simple or UTF-8 field names. It cannot exceed - 1500 bytes, and cannot be empty. Simple field names match - ``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are - escaped by surrounding them with backticks. - - Returns: - List[str, ...]: The list of field names in the field path. - """ - # code dredged back up from - # https://github.com/googleapis/google-cloud-python/pull/5109/files - field_names = [] - for field_name in split_field_path(api_repr): - # non-simple field name - if field_name[0] == "`" and field_name[-1] == "`": - field_name = field_name[1:-1] - field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK) - field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH) - field_names.append(field_name) - return field_names - - -def render_field_path(field_names): - """Create a **field path** from a list of nested field names. - - A **field path** is a ``.``-delimited concatenation of the field - names. It is used to represent a nested field. For example, - in the data - - .. code-block: python - - data = { - 'aa': { - 'bb': { - 'cc': 10, - }, - }, - } - - the field path ``'aa.bb.cc'`` represents that data stored in - ``data['aa']['bb']['cc']``. - - Args: - field_names (Iterable[str, ...]): The list of field names. - - Returns: - str: The ``.``-delimited field path. - """ - result = [] - - for field_name in field_names: - match = _SIMPLE_FIELD_NAME.match(field_name) - if match and match.group(0) == field_name: - result.append(field_name) - else: - replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace( - _BACKTICK, _ESCAPED_BACKTICK - ) - result.append(_BACKTICK + replaced + _BACKTICK) - - return _FIELD_PATH_DELIMITER.join(result) - - -get_field_path = render_field_path # backward-compatibility - - -def get_nested_value(field_path, data): - """Get a (potentially nested) value from a dictionary. - - If the data is nested, for example: - - .. code-block:: python - - >>> data - { - 'top1': { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - }, - 'top6': b'\x00\x01 foo', - } - - a **field path** can be used to access the nested data. For - example: - - .. code-block:: python - - >>> get_nested_value('top1', data) - { - 'middle2': { - 'bottom3': 20, - 'bottom4': 22, - }, - 'middle5': True, - } - >>> get_nested_value('top1.middle2', data) - { - 'bottom3': 20, - 'bottom4': 22, - } - >>> get_nested_value('top1.middle2.bottom3', data) - 20 - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` for - more information on **field paths**. - - Args: - field_path (str): A field path (``.``-delimited list of - field names). - data (Dict[str, Any]): The (possibly nested) data. - - Returns: - Any: (A copy of) the value stored for the ``field_path``. - - Raises: - KeyError: If the ``field_path`` does not match nested data. - """ - field_names = parse_field_path(field_path) - - nested_data = data - for index, field_name in enumerate(field_names): - if isinstance(nested_data, collections_abc.Mapping): - if field_name in nested_data: - nested_data = nested_data[field_name] - else: - if index == 0: - msg = _FIELD_PATH_MISSING_TOP.format(field_name) - raise KeyError(msg) - else: - partial = render_field_path(field_names[:index]) - msg = _FIELD_PATH_MISSING_KEY.format(field_name, partial) - raise KeyError(msg) - else: - partial = render_field_path(field_names[:index]) - msg = _FIELD_PATH_WRONG_TYPE.format(partial, field_name) - raise KeyError(msg) - - return nested_data - - -class FieldPath(object): - """Field Path object for client use. - - A field path is a sequence of element keys, separated by periods. - Each element key can be either a simple identifier, or a full unicode - string. - - In the string representation of a field path, non-identifier elements - must be quoted using backticks, with internal backticks and backslashes - escaped with a backslash. - - Args: - parts: (one or more strings) - Indicating path of the key to be used. - """ - - def __init__(self, *parts): - for part in parts: - if not isinstance(part, six.string_types) or not part: - error = "One or more components is not a string or is empty." - raise ValueError(error) - self.parts = tuple(parts) - - @classmethod - def from_api_repr(cls, api_repr): - """Factory: create a FieldPath from the string formatted per the API. - - Args: - api_repr (str): a string path, with non-identifier elements quoted - It cannot exceed 1500 characters, and cannot be empty. - Returns: - (:class:`FieldPath`) An instance parsed from ``api_repr``. - Raises: - ValueError if the parsing fails - """ - api_repr = api_repr.strip() - if not api_repr: - raise ValueError("Field path API representation cannot be empty.") - return cls(*parse_field_path(api_repr)) - - @classmethod - def from_string(cls, path_string): - """Factory: create a FieldPath from a unicode string representation. - - This method splits on the character `.` and disallows the - characters `~*/[]`. To create a FieldPath whose components have - those characters, call the constructor. - - Args: - path_string (str): A unicode string which cannot contain - `~*/[]` characters, cannot exceed 1500 bytes, and cannot be empty. - - Returns: - (:class:`FieldPath`) An instance parsed from ``path_string``. - """ - try: - return cls.from_api_repr(path_string) - except ValueError: - elements = path_string.split(".") - for element in elements: - if not element: - raise ValueError("Empty element") - if _LEADING_ALPHA_INVALID.match(element): - raise ValueError( - "Non-alphanum char in element with leading alpha: {}".format( - element - ) - ) - return FieldPath(*elements) - - def __repr__(self): - paths = "" - for part in self.parts: - paths += "'" + part + "'," - paths = paths[:-1] - return "FieldPath({})".format(paths) - - def __hash__(self): - return hash(self.to_api_repr()) - - def __eq__(self, other): - if isinstance(other, FieldPath): - return self.parts == other.parts - return NotImplemented - - def __lt__(self, other): - if isinstance(other, FieldPath): - return self.parts < other.parts - return NotImplemented - - def __add__(self, other): - """Adds `other` field path to end of this field path. - - Args: - other (~google.cloud.firestore_v1beta1._helpers.FieldPath, str): - The field path to add to the end of this `FieldPath`. - """ - if isinstance(other, FieldPath): - parts = self.parts + other.parts - return FieldPath(*parts) - elif isinstance(other, six.string_types): - parts = self.parts + FieldPath.from_string(other).parts - return FieldPath(*parts) - else: - return NotImplemented - - def to_api_repr(self): - """Render a quoted string representation of the FieldPath - - Returns: - (str) Quoted string representation of the path stored - within this FieldPath. - """ - return render_field_path(self.parts) - - def eq_or_parent(self, other): - """Check whether ``other`` is an ancestor. - - Returns: - (bool) True IFF ``other`` is an ancestor or equal to ``self``, - else False. - """ - return self.parts[: len(other.parts)] == other.parts[: len(self.parts)] - - def lineage(self): - """Return field paths for all parents. - - Returns: Set[:class:`FieldPath`] - """ - indexes = six.moves.range(1, len(self.parts)) - return {FieldPath(*self.parts[:index]) for index in indexes} diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/__init__.py b/firestore/google/cloud/firestore_v1beta1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/enums.py b/firestore/google/cloud/firestore_v1beta1/gapic/enums.py deleted file mode 100644 index ee7a9ec6f589..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/gapic/enums.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value for - the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class DocumentTransform(object): - class FieldTransform(object): - class ServerValue(enum.IntEnum): - """ - A value that is calculated by the server. - - Attributes: - SERVER_VALUE_UNSPECIFIED (int): Unspecified. This value must not be used. - REQUEST_TIME (int): The time at which the server processed the request, with millisecond - precision. - """ - - SERVER_VALUE_UNSPECIFIED = 0 - REQUEST_TIME = 1 - - -class StructuredQuery(object): - class Direction(enum.IntEnum): - """ - A sort direction. - - Attributes: - DIRECTION_UNSPECIFIED (int): Unspecified. - ASCENDING (int): Ascending. - DESCENDING (int): Descending. - """ - - DIRECTION_UNSPECIFIED = 0 - ASCENDING = 1 - DESCENDING = 2 - - class CompositeFilter(object): - class Operator(enum.IntEnum): - """ - A composite filter operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - AND (int): The results are required to satisfy each of the combined filters. - """ - - OPERATOR_UNSPECIFIED = 0 - AND = 1 - - class FieldFilter(object): - class Operator(enum.IntEnum): - """ - A field filter operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - LESS_THAN (int): Less than. Requires that the field come first in ``order_by``. - LESS_THAN_OR_EQUAL (int): Less than or equal. Requires that the field come first in ``order_by``. - GREATER_THAN (int): Greater than. Requires that the field come first in ``order_by``. - GREATER_THAN_OR_EQUAL (int): Greater than or equal. Requires that the field come first in - ``order_by``. - EQUAL (int): Equal. - ARRAY_CONTAINS (int): Contains. Requires that the field is an array. - IN (int): In. Requires that ``value`` is a non-empty ArrayValue with at most 10 - values. - ARRAY_CONTAINS_ANY (int): Contains any. Requires that the field is an array and ``value`` is a - non-empty ArrayValue with at most 10 values. - """ - - OPERATOR_UNSPECIFIED = 0 - LESS_THAN = 1 - LESS_THAN_OR_EQUAL = 2 - GREATER_THAN = 3 - GREATER_THAN_OR_EQUAL = 4 - EQUAL = 5 - ARRAY_CONTAINS = 7 - IN = 8 - ARRAY_CONTAINS_ANY = 9 - - class UnaryFilter(object): - class Operator(enum.IntEnum): - """ - A unary operator. - - Attributes: - OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used. - IS_NAN (int): Test if a field is equal to NaN. - IS_NULL (int): Test if an expression evaluates to Null. - """ - - OPERATOR_UNSPECIFIED = 0 - IS_NAN = 2 - IS_NULL = 3 - - -class TargetChange(object): - class TargetChangeType(enum.IntEnum): - """ - The type of change. - - Attributes: - NO_CHANGE (int): No change has occurred. Used only to send an updated ``resume_token``. - ADD (int): The targets have been added. - REMOVE (int): The targets have been removed. - CURRENT (int): The targets reflect all changes committed before the targets were added - to the stream. - - This will be sent after or with a ``read_time`` that is greater than or - equal to the time at which the targets were added. - - Listeners can wait for this change if read-after-write semantics are - desired. - RESET (int): The targets have been reset, and a new initial state for the targets - will be returned in subsequent changes. - - After the initial state is complete, ``CURRENT`` will be returned even - if the target was previously indicated to be ``CURRENT``. - """ - - NO_CHANGE = 0 - ADD = 1 - REMOVE = 2 - CURRENT = 3 - RESET = 4 diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py b/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py deleted file mode 100644 index 659094164eaa..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py +++ /dev/null @@ -1,1461 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.firestore.v1beta1 Firestore API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.firestore_v1beta1.gapic import enums -from google.cloud.firestore_v1beta1.gapic import firestore_client_config -from google.cloud.firestore_v1beta1.gapic.transports import firestore_grpc_transport -from google.cloud.firestore_v1beta1.proto import common_pb2 -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.proto import firestore_pb2 -from google.cloud.firestore_v1beta1.proto import firestore_pb2_grpc -from google.cloud.firestore_v1beta1.proto import query_pb2 -from google.cloud.firestore_v1beta1.proto import write_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-firestore" -).version - - -class FirestoreClient(object): - """ - The Cloud Firestore service. - - This service exposes several types of comparable timestamps: - - - ``create_time`` - The time at which a document was created. Changes - only when a document is deleted, then re-created. Increases in a - strict monotonic fashion. - - ``update_time`` - The time at which a document was last updated. - Changes every time a document is modified. Does not change when a - write results in no modifications. Increases in a strict monotonic - fashion. - - ``read_time`` - The time at which a particular state was observed. - Used to denote a consistent snapshot of the database or the time at - which a Document was observed to not exist. - - ``commit_time`` - The time at which the writes in a transaction were - committed. Any read with an equal or greater ``read_time`` is - guaranteed to see the effects of the transaction. - """ - - SERVICE_ADDRESS = "firestore.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.firestore.v1beta1.Firestore" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FirestoreClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def any_path_path(cls, project, database, document, any_path): - """Return a fully-qualified any_path string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents/{document}/{any_path=**}", - project=project, - database=database, - document=document, - any_path=any_path, - ) - - @classmethod - def database_root_path(cls, project, database): - """Return a fully-qualified database_root string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}", - project=project, - database=database, - ) - - @classmethod - def document_path_path(cls, project, database, document_path): - """Return a fully-qualified document_path string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents/{document_path=**}", - project=project, - database=database, - document_path=document_path, - ) - - @classmethod - def document_root_path(cls, project, database): - """Return a fully-qualified document_root string.""" - return google.api_core.path_template.expand( - "projects/{project}/databases/{database}/documents", - project=project, - database=database, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.FirestoreGrpcTransport, - Callable[[~.Credentials, type], ~.FirestoreGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = firestore_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=firestore_grpc_transport.FirestoreGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = firestore_grpc_transport.FirestoreGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def get_document( - self, - name, - mask=None, - transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a single document. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> response = client.get_document(name) - - Args: - name (str): Required. The resource name of the Document to get. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - transaction (bytes): Reads the document in a transaction. - read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads the version of the document at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_document" not in self._inner_api_calls: - self._inner_api_calls[ - "get_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_document, - default_retry=self._method_configs["GetDocument"].retry, - default_timeout=self._method_configs["GetDocument"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, read_time=read_time - ) - - request = firestore_pb2.GetDocumentRequest( - name=name, mask=mask, transaction=transaction, read_time=read_time - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_documents( - self, - parent, - collection_id, - page_size=None, - order_by=None, - mask=None, - transaction=None, - read_time=None, - show_missing=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists documents. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # TODO: Initialize `collection_id`: - >>> collection_id = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_documents(parent, collection_id): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_documents(parent, collection_id).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: ``projects/my-project/databases/my-database/documents`` or - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - collection_id (str): Required. The collection ID, relative to ``parent``, to list. For - example: ``chatrooms`` or ``messages``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - order_by (str): The order to sort results by. For example: ``priority desc, name``. - mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If a document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - transaction (bytes): Reads documents in a transaction. - read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` - show_missing (bool): If the list should show missing documents. A missing document is a - document that does not exist but has sub-documents. These documents will - be returned with a key but will not have fields, - ``Document.create_time``, or ``Document.update_time`` set. - - Requests with ``show_missing`` may not specify ``where`` or - ``order_by``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.firestore_v1beta1.types.Document` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "list_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_documents, - default_retry=self._method_configs["ListDocuments"].retry, - default_timeout=self._method_configs["ListDocuments"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, read_time=read_time - ) - - request = firestore_pb2.ListDocumentsRequest( - parent=parent, - collection_id=collection_id, - page_size=page_size, - order_by=order_by, - mask=mask, - transaction=transaction, - read_time=read_time, - show_missing=show_missing, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_documents"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="documents", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def create_document( - self, - parent, - collection_id, - document_id, - document, - mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new document. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # TODO: Initialize `collection_id`: - >>> collection_id = '' - >>> - >>> # TODO: Initialize `document_id`: - >>> document_id = '' - >>> - >>> # TODO: Initialize `document`: - >>> document = {} - >>> - >>> response = client.create_document(parent, collection_id, document_id, document) - - Args: - parent (str): Required. The parent resource. For example: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`` - collection_id (str): Required. The collection ID, relative to ``parent``, to list. For - example: ``chatrooms``. - document_id (str): The client-assigned document ID to use for this document. - - Optional. If not specified, an ID will be assigned by the service. - document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): Required. The document to create. ``name`` must not be set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Document` - mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_document" not in self._inner_api_calls: - self._inner_api_calls[ - "create_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_document, - default_retry=self._method_configs["CreateDocument"].retry, - default_timeout=self._method_configs["CreateDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.CreateDocumentRequest( - parent=parent, - collection_id=collection_id, - document_id=document_id, - document=document, - mask=mask, - ) - return self._inner_api_calls["create_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_document( - self, - document, - update_mask, - mask=None, - current_document=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates or inserts a document. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> # TODO: Initialize `document`: - >>> document = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_document(document, update_mask) - - Args: - document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): Required. The updated document. - Creates the document if it does not already exist. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Document` - update_mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to update. - None of the field paths in the mask may contain a reserved name. - - If the document exists on the server and has fields not referenced in the - mask, they are left unchanged. - Fields referenced in the mask, but not present in the input document, are - deleted from the document on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If the document has a field that is not present in this mask, that field - will not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. - The request will fail if this is set and not met by the target document. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Precondition` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1beta1.types.Document` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_document" not in self._inner_api_calls: - self._inner_api_calls[ - "update_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_document, - default_retry=self._method_configs["UpdateDocument"].retry, - default_timeout=self._method_configs["UpdateDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.UpdateDocumentRequest( - document=document, - update_mask=update_mask, - mask=mask, - current_document=current_document, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("document.name", document.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_document( - self, - name, - current_document=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a document. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> client.delete_document(name) - - Args: - name (str): Required. The resource name of the Document to delete. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. - The request will fail if this is set and not met by the target document. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Precondition` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_document" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_document" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_document, - default_retry=self._method_configs["DeleteDocument"].retry, - default_timeout=self._method_configs["DeleteDocument"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.DeleteDocumentRequest( - name=name, current_document=current_document - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_document"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_get_documents( - self, - database, - documents, - mask=None, - transaction=None, - new_transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets multiple documents. - - Documents returned by this method are not guaranteed to be returned in the - same order that they were requested. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `documents`: - >>> documents = [] - >>> - >>> for element in client.batch_get_documents(database, documents): - ... # process element - ... pass - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - documents (list[str]): The names of the documents to retrieve. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - The request will fail if any of the document is not a child resource of - the given ``database``. Duplicate names will be elided. - mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. - - If a document has a field that is not present in this mask, that field will - not be returned in the response. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` - transaction (bytes): Reads documents in a transaction. - new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. - Defaults to a read-only transaction. - The new transaction ID will be returned as the first response in the - stream. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` - read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_get_documents" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_get_documents" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_get_documents, - default_retry=self._method_configs["BatchGetDocuments"].retry, - default_timeout=self._method_configs["BatchGetDocuments"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - - request = firestore_pb2.BatchGetDocumentsRequest( - database=database, - documents=documents, - mask=mask, - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["batch_get_documents"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def begin_transaction( - self, - database, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts a new transaction. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> response = client.begin_transaction(database) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction. - Defaults to a read-write transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "begin_transaction" not in self._inner_api_calls: - self._inner_api_calls[ - "begin_transaction" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.begin_transaction, - default_retry=self._method_configs["BeginTransaction"].retry, - default_timeout=self._method_configs["BeginTransaction"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.BeginTransactionRequest( - database=database, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["begin_transaction"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def commit( - self, - database, - writes, - transaction=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Commits a transaction, while optionally updating documents. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `writes`: - >>> writes = [] - >>> - >>> response = client.commit(database, writes) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - writes (list[Union[dict, ~google.cloud.firestore_v1beta1.types.Write]]): The writes to apply. - - Always executed atomically and in order. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Write` - transaction (bytes): If set, applies all writes in this transaction, and commits it. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.firestore_v1beta1.types.CommitResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "commit" not in self._inner_api_calls: - self._inner_api_calls[ - "commit" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.commit, - default_retry=self._method_configs["Commit"].retry, - default_timeout=self._method_configs["Commit"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.CommitRequest( - database=database, writes=writes, transaction=transaction - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["commit"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback( - self, - database, - transaction, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a transaction. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> - >>> # TODO: Initialize `transaction`: - >>> transaction = b'' - >>> - >>> client.rollback(database, transaction) - - Args: - database (str): Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - transaction (bytes): Required. The transaction to roll back. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback, - default_retry=self._method_configs["Rollback"].retry, - default_timeout=self._method_configs["Rollback"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.RollbackRequest( - database=database, transaction=transaction - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["rollback"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def run_query( - self, - parent, - structured_query=None, - transaction=None, - new_transaction=None, - read_time=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Runs a query. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> for element in client.run_query(parent): - ... # process element - ... pass - - Args: - parent (str): Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: ``projects/my-project/databases/my-database/documents`` or - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - structured_query (Union[dict, ~google.cloud.firestore_v1beta1.types.StructuredQuery]): A structured query. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.StructuredQuery` - transaction (bytes): Reads documents in a transaction. - new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. - Defaults to a read-only transaction. - The new transaction ID will be returned as the first response in the - stream. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` - read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. - This may not be older than 60 seconds. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1beta1.types.RunQueryResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "run_query" not in self._inner_api_calls: - self._inner_api_calls[ - "run_query" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.run_query, - default_retry=self._method_configs["RunQuery"].retry, - default_timeout=self._method_configs["RunQuery"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(structured_query=structured_query) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - - request = firestore_pb2.RunQueryRequest( - parent=parent, - structured_query=structured_query, - transaction=transaction, - new_transaction=new_transaction, - read_time=read_time, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["run_query"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def write( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams batches of document updates and deletes, in order. - - EXPERIMENTAL: This method interface might change in the future. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> request = {'database': database} - >>> - >>> requests = [request] - >>> for element in client.write(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.firestore_v1beta1.proto.firestore_pb2.WriteRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.WriteRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1beta1.types.WriteResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "write" not in self._inner_api_calls: - self._inner_api_calls[ - "write" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.write, - default_retry=self._method_configs["Write"].retry, - default_timeout=self._method_configs["Write"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["write"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) - - def listen( - self, - requests, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Listens to changes. - - EXPERIMENTAL: This method interface might change in the future. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') - >>> request = {'database': database} - >>> - >>> requests = [request] - >>> for element in client.listen(requests): - ... # process element - ... pass - - Args: - requests (iterator[dict|google.cloud.firestore_v1beta1.proto.firestore_pb2.ListenRequest]): The input objects. If a dict is provided, it must be of the - same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.ListenRequest` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.firestore_v1beta1.types.ListenResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "listen" not in self._inner_api_calls: - self._inner_api_calls[ - "listen" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.listen, - default_retry=self._method_configs["Listen"].retry, - default_timeout=self._method_configs["Listen"].timeout, - client_info=self._client_info, - ) - - return self._inner_api_calls["listen"]( - requests, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_collection_ids( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all the collection IDs underneath a document. - - Example: - >>> from google.cloud import firestore_v1beta1 - >>> - >>> client = firestore_v1beta1.FirestoreClient() - >>> - >>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') - >>> - >>> # Iterate over all results - >>> for element in client.list_collection_ids(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_collection_ids(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The parent document. In the format: - ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. - For example: - ``projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`str` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_collection_ids" not in self._inner_api_calls: - self._inner_api_calls[ - "list_collection_ids" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_collection_ids, - default_retry=self._method_configs["ListCollectionIds"].retry, - default_timeout=self._method_configs["ListCollectionIds"].timeout, - client_info=self._client_info, - ) - - request = firestore_pb2.ListCollectionIdsRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_collection_ids"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="collection_ids", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client_config.py b/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client_config.py deleted file mode 100644 index dd458fe97643..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client_config.py +++ /dev/null @@ -1,97 +0,0 @@ -config = { - "interfaces": { - "google.firestore.v1beta1.Firestore": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - }, - "streaming": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "GetDocument": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListDocuments": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateDocument": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateDocument": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteDocument": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "BatchGetDocuments": { - "timeout_millis": 300000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "BeginTransaction": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "Commit": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "Rollback": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "RunQuery": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "Write": { - "timeout_millis": 86400000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "streaming", - }, - "Listen": { - "timeout_millis": 86400000, - "retry_codes_name": "idempotent", - "retry_params_name": "streaming", - }, - "ListCollectionIds": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/transports/__init__.py b/firestore/google/cloud/firestore_v1beta1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1beta1/gapic/transports/firestore_grpc_transport.py b/firestore/google/cloud/firestore_v1beta1/gapic/transports/firestore_grpc_transport.py deleted file mode 100644 index 9f26080c82c3..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/gapic/transports/firestore_grpc_transport.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.firestore_v1beta1.proto import firestore_pb2_grpc - - -class FirestoreGrpcTransport(object): - """gRPC transport class providing stubs for - google.firestore.v1beta1 Firestore API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/datastore", - ) - - def __init__( - self, channel=None, credentials=None, address="firestore.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = {"firestore_stub": firestore_pb2_grpc.FirestoreStub(channel)} - - @classmethod - def create_channel( - cls, address="firestore.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def get_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.get_document`. - - Gets a single document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].GetDocument - - @property - def list_documents(self): - """Return the gRPC stub for :meth:`FirestoreClient.list_documents`. - - Lists documents. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].ListDocuments - - @property - def create_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.create_document`. - - Creates a new document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].CreateDocument - - @property - def update_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.update_document`. - - Updates or inserts a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].UpdateDocument - - @property - def delete_document(self): - """Return the gRPC stub for :meth:`FirestoreClient.delete_document`. - - Deletes a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].DeleteDocument - - @property - def batch_get_documents(self): - """Return the gRPC stub for :meth:`FirestoreClient.batch_get_documents`. - - Gets multiple documents. - - Documents returned by this method are not guaranteed to be returned in the - same order that they were requested. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].BatchGetDocuments - - @property - def begin_transaction(self): - """Return the gRPC stub for :meth:`FirestoreClient.begin_transaction`. - - Starts a new transaction. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].BeginTransaction - - @property - def commit(self): - """Return the gRPC stub for :meth:`FirestoreClient.commit`. - - Commits a transaction, while optionally updating documents. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Commit - - @property - def rollback(self): - """Return the gRPC stub for :meth:`FirestoreClient.rollback`. - - Rolls back a transaction. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Rollback - - @property - def run_query(self): - """Return the gRPC stub for :meth:`FirestoreClient.run_query`. - - Runs a query. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].RunQuery - - @property - def write(self): - """Return the gRPC stub for :meth:`FirestoreClient.write`. - - Streams batches of document updates and deletes, in order. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Write - - @property - def listen(self): - """Return the gRPC stub for :meth:`FirestoreClient.listen`. - - Listens to changes. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].Listen - - @property - def list_collection_ids(self): - """Return the gRPC stub for :meth:`FirestoreClient.list_collection_ids`. - - Lists all the collection IDs underneath a document. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["firestore_stub"].ListCollectionIds diff --git a/firestore/google/cloud/firestore_v1beta1/order.py b/firestore/google/cloud/firestore_v1beta1/order.py deleted file mode 100644 index 79207f530c42..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/order.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from enum import Enum -from google.cloud.firestore_v1beta1._helpers import decode_value -import math - - -class TypeOrder(Enum): - # NOTE: This order is defined by the backend and cannot be changed. - NULL = 0 - BOOLEAN = 1 - NUMBER = 2 - TIMESTAMP = 3 - STRING = 4 - BLOB = 5 - REF = 6 - GEO_POINT = 7 - ARRAY = 8 - OBJECT = 9 - - @staticmethod - def from_value(value): - v = value.WhichOneof("value_type") - - lut = { - "null_value": TypeOrder.NULL, - "boolean_value": TypeOrder.BOOLEAN, - "integer_value": TypeOrder.NUMBER, - "double_value": TypeOrder.NUMBER, - "timestamp_value": TypeOrder.TIMESTAMP, - "string_value": TypeOrder.STRING, - "bytes_value": TypeOrder.BLOB, - "reference_value": TypeOrder.REF, - "geo_point_value": TypeOrder.GEO_POINT, - "array_value": TypeOrder.ARRAY, - "map_value": TypeOrder.OBJECT, - } - - if v not in lut: - raise ValueError("Could not detect value type for " + v) - return lut[v] - - -class Order(object): - """ - Order implements the ordering semantics of the backend. - """ - - @classmethod - def compare(cls, left, right): - """ - Main comparison function for all Firestore types. - @return -1 is left < right, 0 if left == right, otherwise 1 - """ - # First compare the types. - leftType = TypeOrder.from_value(left).value - rightType = TypeOrder.from_value(right).value - - if leftType != rightType: - if leftType < rightType: - return -1 - return 1 - - value_type = left.WhichOneof("value_type") - - if value_type == "null_value": - return 0 # nulls are all equal - elif value_type == "boolean_value": - return cls._compare_to(left.boolean_value, right.boolean_value) - elif value_type == "integer_value": - return cls.compare_numbers(left, right) - elif value_type == "double_value": - return cls.compare_numbers(left, right) - elif value_type == "timestamp_value": - return cls.compare_timestamps(left, right) - elif value_type == "string_value": - return cls._compare_to(left.string_value, right.string_value) - elif value_type == "bytes_value": - return cls.compare_blobs(left, right) - elif value_type == "reference_value": - return cls.compare_resource_paths(left, right) - elif value_type == "geo_point_value": - return cls.compare_geo_points(left, right) - elif value_type == "array_value": - return cls.compare_arrays(left, right) - elif value_type == "map_value": - return cls.compare_objects(left, right) - else: - raise ValueError("Unknown ``value_type``", str(value_type)) - - @staticmethod - def compare_blobs(left, right): - left_bytes = left.bytes_value - right_bytes = right.bytes_value - - return Order._compare_to(left_bytes, right_bytes) - - @staticmethod - def compare_timestamps(left, right): - left = left.timestamp_value - right = right.timestamp_value - - seconds = Order._compare_to(left.seconds or 0, right.seconds or 0) - if seconds != 0: - return seconds - - return Order._compare_to(left.nanos or 0, right.nanos or 0) - - @staticmethod - def compare_geo_points(left, right): - left_value = decode_value(left, None) - right_value = decode_value(right, None) - cmp = (left_value.latitude > right_value.latitude) - ( - left_value.latitude < right_value.latitude - ) - - if cmp != 0: - return cmp - return (left_value.longitude > right_value.longitude) - ( - left_value.longitude < right_value.longitude - ) - - @staticmethod - def compare_resource_paths(left, right): - left = left.reference_value - right = right.reference_value - - left_segments = left.split("/") - right_segments = right.split("/") - shorter = min(len(left_segments), len(right_segments)) - # compare segments - for i in range(shorter): - if left_segments[i] < right_segments[i]: - return -1 - if left_segments[i] > right_segments[i]: - return 1 - - left_length = len(left) - right_length = len(right) - return (left_length > right_length) - (left_length < right_length) - - @staticmethod - def compare_arrays(left, right): - l_values = left.array_value.values - r_values = right.array_value.values - - length = min(len(l_values), len(r_values)) - for i in range(length): - cmp = Order.compare(l_values[i], r_values[i]) - if cmp != 0: - return cmp - - return Order._compare_to(len(l_values), len(r_values)) - - @staticmethod - def compare_objects(left, right): - left_fields = left.map_value.fields - right_fields = right.map_value.fields - - for left_key, right_key in zip(sorted(left_fields), sorted(right_fields)): - keyCompare = Order._compare_to(left_key, right_key) - if keyCompare != 0: - return keyCompare - - value_compare = Order.compare( - left_fields[left_key], right_fields[right_key] - ) - if value_compare != 0: - return value_compare - - return Order._compare_to(len(left_fields), len(right_fields)) - - @staticmethod - def compare_numbers(left, right): - left_value = decode_value(left, None) - right_value = decode_value(right, None) - return Order.compare_doubles(left_value, right_value) - - @staticmethod - def compare_doubles(left, right): - if math.isnan(left): - if math.isnan(right): - return 0 - return -1 - if math.isnan(right): - return 1 - - return Order._compare_to(left, right) - - @staticmethod - def _compare_to(left, right): - # We can't just use cmp(left, right) because cmp doesn't exist - # in Python 3, so this is an equivalent suggested by - # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons - return (left > right) - (left < right) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/__init__.py b/firestore/google/cloud/firestore_v1beta1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1beta1/proto/admin/__init__.py b/firestore/google/cloud/firestore_v1beta1/proto/admin/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2.py deleted file mode 100644 index 9bb7f6553b04..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2.py +++ /dev/null @@ -1,1343 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/admin/firestore_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.firestore_v1beta1.proto.admin import ( - index_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/admin/firestore_admin.proto", - package="google.firestore.admin.v1beta1", - syntax="proto3", - serialized_pb=_b( - '\n@google/cloud/firestore_v1beta1/proto/admin/firestore_admin.proto\x12\x1egoogle.firestore.admin.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/firestore_v1beta1/proto/admin/index.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x80\x03\n\x16IndexOperationMetadata\x12.\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05index\x18\x03 \x01(\t\x12\\\n\x0eoperation_type\x18\x04 \x01(\x0e\x32\x44.google.firestore.admin.v1beta1.IndexOperationMetadata.OperationType\x12\x11\n\tcancelled\x18\x05 \x01(\x08\x12\x43\n\x11\x64ocument_progress\x18\x06 \x01(\x0b\x32(.google.firestore.admin.v1beta1.Progress"C\n\rOperationType\x12\x1e\n\x1aOPERATION_TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x43REATING_INDEX\x10\x01":\n\x08Progress\x12\x16\n\x0ework_completed\x18\x01 \x01(\x03\x12\x16\n\x0ework_estimated\x18\x02 \x01(\x03"Z\n\x12\x43reateIndexRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x34\n\x05index\x18\x02 \x01(\x0b\x32%.google.firestore.admin.v1beta1.Index"\x1f\n\x0fGetIndexRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"[\n\x12ListIndexesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t""\n\x12\x44\x65leteIndexRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"f\n\x13ListIndexesResponse\x12\x36\n\x07indexes\x18\x01 \x03(\x0b\x32%.google.firestore.admin.v1beta1.Index\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x9c\x05\n\x0e\x46irestoreAdmin\x12\xa1\x01\n\x0b\x43reateIndex\x12\x32.google.firestore.admin.v1beta1.CreateIndexRequest\x1a\x1d.google.longrunning.Operation"?\x82\xd3\xe4\x93\x02\x39"0/v1beta1/{parent=projects/*/databases/*}/indexes:\x05index\x12\xb0\x01\n\x0bListIndexes\x12\x32.google.firestore.admin.v1beta1.ListIndexesRequest\x1a\x33.google.firestore.admin.v1beta1.ListIndexesResponse"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1beta1/{parent=projects/*/databases/*}/indexes\x12\x9c\x01\n\x08GetIndex\x12/.google.firestore.admin.v1beta1.GetIndexRequest\x1a%.google.firestore.admin.v1beta1.Index"8\x82\xd3\xe4\x93\x02\x32\x12\x30/v1beta1/{name=projects/*/databases/*/indexes/*}\x12\x93\x01\n\x0b\x44\x65leteIndex\x12\x32.google.firestore.admin.v1beta1.DeleteIndexRequest\x1a\x16.google.protobuf.Empty"8\x82\xd3\xe4\x93\x02\x32*0/v1beta1/{name=projects/*/databases/*/indexes/*}B\xae\x01\n"com.google.firestore.admin.v1beta1B\x13\x46irestoreAdminProtoP\x01ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\xa2\x02\x04GCFS\xaa\x02$Google.Cloud.Firestore.Admin.V1Beta1b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_INDEXOPERATIONMETADATA_OPERATIONTYPE = _descriptor.EnumDescriptor( - name="OperationType", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.OperationType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="OPERATION_TYPE_UNSPECIFIED", - index=0, - number=0, - options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CREATING_INDEX", index=1, number=1, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=603, - serialized_end=670, -) -_sym_db.RegisterEnumDescriptor(_INDEXOPERATIONMETADATA_OPERATIONTYPE) - - -_INDEXOPERATIONMETADATA = _descriptor.Descriptor( - name="IndexOperationMetadata", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.start_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.end_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="index", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.index", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="operation_type", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.operation_type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="cancelled", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.cancelled", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="document_progress", - full_name="google.firestore.admin.v1beta1.IndexOperationMetadata.document_progress", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INDEXOPERATIONMETADATA_OPERATIONTYPE], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=286, - serialized_end=670, -) - - -_PROGRESS = _descriptor.Descriptor( - name="Progress", - full_name="google.firestore.admin.v1beta1.Progress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="work_completed", - full_name="google.firestore.admin.v1beta1.Progress.work_completed", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="work_estimated", - full_name="google.firestore.admin.v1beta1.Progress.work_estimated", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=672, - serialized_end=730, -) - - -_CREATEINDEXREQUEST = _descriptor.Descriptor( - name="CreateIndexRequest", - full_name="google.firestore.admin.v1beta1.CreateIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.admin.v1beta1.CreateIndexRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="index", - full_name="google.firestore.admin.v1beta1.CreateIndexRequest.index", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=732, - serialized_end=822, -) - - -_GETINDEXREQUEST = _descriptor.Descriptor( - name="GetIndexRequest", - full_name="google.firestore.admin.v1beta1.GetIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1beta1.GetIndexRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=824, - serialized_end=855, -) - - -_LISTINDEXESREQUEST = _descriptor.Descriptor( - name="ListIndexesRequest", - full_name="google.firestore.admin.v1beta1.ListIndexesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.admin.v1beta1.ListIndexesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.firestore.admin.v1beta1.ListIndexesRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.firestore.admin.v1beta1.ListIndexesRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.firestore.admin.v1beta1.ListIndexesRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=857, - serialized_end=948, -) - - -_DELETEINDEXREQUEST = _descriptor.Descriptor( - name="DeleteIndexRequest", - full_name="google.firestore.admin.v1beta1.DeleteIndexRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1beta1.DeleteIndexRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=950, - serialized_end=984, -) - - -_LISTINDEXESRESPONSE = _descriptor.Descriptor( - name="ListIndexesResponse", - full_name="google.firestore.admin.v1beta1.ListIndexesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="indexes", - full_name="google.firestore.admin.v1beta1.ListIndexesResponse.indexes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.firestore.admin.v1beta1.ListIndexesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=986, - serialized_end=1088, -) - -_INDEXOPERATIONMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_INDEXOPERATIONMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_INDEXOPERATIONMETADATA.fields_by_name[ - "operation_type" -].enum_type = _INDEXOPERATIONMETADATA_OPERATIONTYPE -_INDEXOPERATIONMETADATA.fields_by_name["document_progress"].message_type = _PROGRESS -_INDEXOPERATIONMETADATA_OPERATIONTYPE.containing_type = _INDEXOPERATIONMETADATA -_CREATEINDEXREQUEST.fields_by_name[ - "index" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2._INDEX -) -_LISTINDEXESRESPONSE.fields_by_name[ - "indexes" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2._INDEX -) -DESCRIPTOR.message_types_by_name["IndexOperationMetadata"] = _INDEXOPERATIONMETADATA -DESCRIPTOR.message_types_by_name["Progress"] = _PROGRESS -DESCRIPTOR.message_types_by_name["CreateIndexRequest"] = _CREATEINDEXREQUEST -DESCRIPTOR.message_types_by_name["GetIndexRequest"] = _GETINDEXREQUEST -DESCRIPTOR.message_types_by_name["ListIndexesRequest"] = _LISTINDEXESREQUEST -DESCRIPTOR.message_types_by_name["DeleteIndexRequest"] = _DELETEINDEXREQUEST -DESCRIPTOR.message_types_by_name["ListIndexesResponse"] = _LISTINDEXESRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -IndexOperationMetadata = _reflection.GeneratedProtocolMessageType( - "IndexOperationMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_INDEXOPERATIONMETADATA, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""Metadata for index operations. This metadata populates the metadata - field of [google.longrunning.Operation][google.longrunning.Operation]. - - - Attributes: - start_time: - The time that work began on the operation. - end_time: - The time the operation ended, either successfully or - otherwise. Unset if the operation is still active. - index: - The index resource that this operation is acting on. For - example: ``projects/{project_id}/databases/{database_id}/index - es/{index_id}`` - operation_type: - The type of index operation. - cancelled: - True if the [google.longrunning.Operation] was cancelled. If - the cancellation is in progress, cancelled will be true but [g - oogle.longrunning.Operation.done][google.longrunning.Operation - .done] will be false. - document_progress: - Progress of the existing operation, measured in number of - documents. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.IndexOperationMetadata) - ), -) -_sym_db.RegisterMessage(IndexOperationMetadata) - -Progress = _reflection.GeneratedProtocolMessageType( - "Progress", - (_message.Message,), - dict( - DESCRIPTOR=_PROGRESS, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""Measures the progress of a particular metric. - - - Attributes: - work_completed: - An estimate of how much work has been completed. Note that - this may be greater than ``work_estimated``. - work_estimated: - An estimate of how much work needs to be performed. Zero if - the work estimate is unavailable. May change as work - progresses. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.Progress) - ), -) -_sym_db.RegisterMessage(Progress) - -CreateIndexRequest = _reflection.GeneratedProtocolMessageType( - "CreateIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINDEXREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - - - Attributes: - parent: - The name of the database this index will apply to. For - example: ``projects/{project_id}/databases/{database_id}`` - index: - The index to create. The name and state should not be - specified. Certain single field indexes cannot be created or - deleted. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.CreateIndexRequest) - ), -) -_sym_db.RegisterMessage(CreateIndexRequest) - -GetIndexRequest = _reflection.GeneratedProtocolMessageType( - "GetIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETINDEXREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex]. - - - Attributes: - name: - The name of the index. For example: ``projects/{project_id}/da - tabases/{database_id}/indexes/{index_id}`` - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.GetIndexRequest) - ), -) -_sym_db.RegisterMessage(GetIndexRequest) - -ListIndexesRequest = _reflection.GeneratedProtocolMessageType( - "ListIndexesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINDEXESREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. - - - Attributes: - parent: - The database name. For example: - ``projects/{project_id}/databases/{database_id}`` - page_size: - The standard List page size. - page_token: - The standard List page token. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.ListIndexesRequest) - ), -) -_sym_db.RegisterMessage(ListIndexesRequest) - -DeleteIndexRequest = _reflection.GeneratedProtocolMessageType( - "DeleteIndexRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEINDEXREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""The request for - [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex]. - - - Attributes: - name: - The index name. For example: ``projects/{project_id}/databases - /{database_id}/indexes/{index_id}`` - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.DeleteIndexRequest) - ), -) -_sym_db.RegisterMessage(DeleteIndexRequest) - -ListIndexesResponse = _reflection.GeneratedProtocolMessageType( - "ListIndexesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINDEXESRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.admin.firestore_admin_pb2", - __doc__="""The response for - [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. - - - Attributes: - indexes: - The indexes. - next_page_token: - The standard List next-page token. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.ListIndexesResponse) - ), -) -_sym_db.RegisterMessage(ListIndexesResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - '\n"com.google.firestore.admin.v1beta1B\023FirestoreAdminProtoP\001ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\242\002\004GCFS\252\002$Google.Cloud.Firestore.Admin.V1Beta1' - ), -) - -_FIRESTOREADMIN = _descriptor.ServiceDescriptor( - name="FirestoreAdmin", - full_name="google.firestore.admin.v1beta1.FirestoreAdmin", - file=DESCRIPTOR, - index=0, - options=None, - serialized_start=1091, - serialized_end=1759, - methods=[ - _descriptor.MethodDescriptor( - name="CreateIndex", - full_name="google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex", - index=0, - containing_service=None, - input_type=_CREATEINDEXREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\0029"0/v1beta1/{parent=projects/*/databases/*}/indexes:\005index' - ), - ), - ), - _descriptor.MethodDescriptor( - name="ListIndexes", - full_name="google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes", - index=1, - containing_service=None, - input_type=_LISTINDEXESREQUEST, - output_type=_LISTINDEXESRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0022\0220/v1beta1/{parent=projects/*/databases/*}/indexes" - ), - ), - ), - _descriptor.MethodDescriptor( - name="GetIndex", - full_name="google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex", - index=2, - containing_service=None, - input_type=_GETINDEXREQUEST, - output_type=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2._INDEX, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0022\0220/v1beta1/{name=projects/*/databases/*/indexes/*}" - ), - ), - ), - _descriptor.MethodDescriptor( - name="DeleteIndex", - full_name="google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex", - index=3, - containing_service=None, - input_type=_DELETEINDEXREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0022*0/v1beta1/{name=projects/*/databases/*/indexes/*}" - ), - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_FIRESTOREADMIN) - -DESCRIPTOR.services_by_name["FirestoreAdmin"] = _FIRESTOREADMIN - -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - - class FirestoreAdminStub(object): - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/CreateIndex", - request_serializer=CreateIndexRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListIndexes = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/ListIndexes", - request_serializer=ListIndexesRequest.SerializeToString, - response_deserializer=ListIndexesResponse.FromString, - ) - self.GetIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/GetIndex", - request_serializer=GetIndexRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.FromString, - ) - self.DeleteIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/DeleteIndex", - request_serializer=DeleteIndexRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - class FirestoreAdminServicer(object): - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def CreateIndex(self, request, context): - """Creates the specified index. - A newly created index's initial state is `CREATING`. On completion of the - returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - If the index already exists, the call will return an `ALREADY_EXISTS` - status. - - During creation, the process could result in an error, in which case the - index will move to the `ERROR` state. The process can be recovered by - fixing the data that caused the error, removing the index with - [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - - Indexes with a single field cannot be created. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListIndexes(self, request, context): - """Lists the indexes that match the specified filters. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIndex(self, request, context): - """Gets an index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteIndex(self, request, context): - """Deletes an index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def add_FirestoreAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateIndex": grpc.unary_unary_rpc_method_handler( - servicer.CreateIndex, - request_deserializer=CreateIndexRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListIndexes": grpc.unary_unary_rpc_method_handler( - servicer.ListIndexes, - request_deserializer=ListIndexesRequest.FromString, - response_serializer=ListIndexesResponse.SerializeToString, - ), - "GetIndex": grpc.unary_unary_rpc_method_handler( - servicer.GetIndex, - request_deserializer=GetIndexRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.SerializeToString, - ), - "DeleteIndex": grpc.unary_unary_rpc_method_handler( - servicer.DeleteIndex, - request_deserializer=DeleteIndexRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.firestore.admin.v1beta1.FirestoreAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - class BetaFirestoreAdminServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def CreateIndex(self, request, context): - """Creates the specified index. - A newly created index's initial state is `CREATING`. On completion of the - returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - If the index already exists, the call will return an `ALREADY_EXISTS` - status. - - During creation, the process could result in an error, in which case the - index will move to the `ERROR` state. The process can be recovered by - fixing the data that caused the error, removing the index with - [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - - Indexes with a single field cannot be created. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def ListIndexes(self, request, context): - """Lists the indexes that match the specified filters. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def GetIndex(self, request, context): - """Gets an index. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def DeleteIndex(self, request, context): - """Deletes an index. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - class BetaFirestoreAdminStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def CreateIndex( - self, - request, - timeout, - metadata=None, - with_call=False, - protocol_options=None, - ): - """Creates the specified index. - A newly created index's initial state is `CREATING`. On completion of the - returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - If the index already exists, the call will return an `ALREADY_EXISTS` - status. - - During creation, the process could result in an error, in which case the - index will move to the `ERROR` state. The process can be recovered by - fixing the data that caused the error, removing the index with - [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - - Indexes with a single field cannot be created. - """ - raise NotImplementedError() - - CreateIndex.future = None - - def ListIndexes( - self, - request, - timeout, - metadata=None, - with_call=False, - protocol_options=None, - ): - """Lists the indexes that match the specified filters. - """ - raise NotImplementedError() - - ListIndexes.future = None - - def GetIndex( - self, - request, - timeout, - metadata=None, - with_call=False, - protocol_options=None, - ): - """Gets an index. - """ - raise NotImplementedError() - - GetIndex.future = None - - def DeleteIndex( - self, - request, - timeout, - metadata=None, - with_call=False, - protocol_options=None, - ): - """Deletes an index. - """ - raise NotImplementedError() - - DeleteIndex.future = None - - def beta_create_FirestoreAdmin_server( - servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "CreateIndex", - ): CreateIndexRequest.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "DeleteIndex", - ): DeleteIndexRequest.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "GetIndex", - ): GetIndexRequest.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "ListIndexes", - ): ListIndexesRequest.FromString, - } - response_serializers = { - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "CreateIndex", - ): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "DeleteIndex", - ): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "GetIndex", - ): google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "ListIndexes", - ): ListIndexesResponse.SerializeToString, - } - method_implementations = { - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "CreateIndex", - ): face_utilities.unary_unary_inline(servicer.CreateIndex), - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "DeleteIndex", - ): face_utilities.unary_unary_inline(servicer.DeleteIndex), - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "GetIndex", - ): face_utilities.unary_unary_inline(servicer.GetIndex), - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "ListIndexes", - ): face_utilities.unary_unary_inline(servicer.ListIndexes), - } - server_options = beta_implementations.server_options( - request_deserializers=request_deserializers, - response_serializers=response_serializers, - thread_pool=pool, - thread_pool_size=pool_size, - default_timeout=default_timeout, - maximum_timeout=maximum_timeout, - ) - return beta_implementations.server( - method_implementations, options=server_options - ) - - def beta_create_FirestoreAdmin_stub( - channel, host=None, metadata_transformer=None, pool=None, pool_size=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "CreateIndex", - ): CreateIndexRequest.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "DeleteIndex", - ): DeleteIndexRequest.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "GetIndex", - ): GetIndexRequest.SerializeToString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "ListIndexes", - ): ListIndexesRequest.SerializeToString, - } - response_deserializers = { - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "CreateIndex", - ): google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "DeleteIndex", - ): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "GetIndex", - ): google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.FromString, - ( - "google.firestore.admin.v1beta1.FirestoreAdmin", - "ListIndexes", - ): ListIndexesResponse.FromString, - } - cardinalities = { - "CreateIndex": cardinality.Cardinality.UNARY_UNARY, - "DeleteIndex": cardinality.Cardinality.UNARY_UNARY, - "GetIndex": cardinality.Cardinality.UNARY_UNARY, - "ListIndexes": cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options( - host=host, - metadata_transformer=metadata_transformer, - request_serializers=request_serializers, - response_deserializers=response_deserializers, - thread_pool=pool, - thread_pool_size=pool_size, - ) - return beta_implementations.dynamic_stub( - channel, - "google.firestore.admin.v1beta1.FirestoreAdmin", - cardinalities, - options=stub_options, - ) - - -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2_grpc.py deleted file mode 100644 index 81eaad7ad17e..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2_grpc.py +++ /dev/null @@ -1,203 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.firestore_v1beta1.proto.admin import ( - firestore_admin_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2, -) -from google.cloud.firestore_v1beta1.proto.admin import ( - index_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class FirestoreAdminStub(object): - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/CreateIndex", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.CreateIndexRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListIndexes = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/ListIndexes", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesResponse.FromString, - ) - self.GetIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/GetIndex", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.GetIndexRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.FromString, - ) - self.DeleteIndex = channel.unary_unary( - "/google.firestore.admin.v1beta1.FirestoreAdmin/DeleteIndex", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.DeleteIndexRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class FirestoreAdminServicer(object): - """The Cloud Firestore Admin API. - - This API provides several administrative services for Cloud Firestore. - - # Concepts - - Project, Database, Namespace, Collection, and Document are used as defined in - the Google Cloud Firestore API. - - Operation: An Operation represents work being performed in the background. - - - # Services - - ## Index - - The index service manages Cloud Firestore indexes. - - Index creation is performed asynchronously. - An Operation resource is created for each such asynchronous operation. - The state of the operation (including any errors encountered) - may be queried via the Operation resource. - - ## Metadata - - Provides metadata and statistical information about data in Cloud Firestore. - The data provided as part of this API may be stale. - - ## Operation - - The Operations collection provides a record of actions performed for the - specified Project (including any Operations in progress). Operations are not - created directly but through calls on other collections or resources. - - An Operation that is not yet done may be cancelled. The request to cancel is - asynchronous and the Operation may continue to run for some time after the - request to cancel is made. - - An Operation that is done may be deleted so that it is no longer listed as - part of the Operation collection. - - Operations are created by service `FirestoreAdmin`, but are accessed via - service `google.longrunning.Operations`. - """ - - def CreateIndex(self, request, context): - """Creates the specified index. - A newly created index's initial state is `CREATING`. On completion of the - returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - If the index already exists, the call will return an `ALREADY_EXISTS` - status. - - During creation, the process could result in an error, in which case the - index will move to the `ERROR` state. The process can be recovered by - fixing the data that caused the error, removing the index with - [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - - Indexes with a single field cannot be created. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListIndexes(self, request, context): - """Lists the indexes that match the specified filters. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIndex(self, request, context): - """Gets an index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteIndex(self, request, context): - """Deletes an index. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_FirestoreAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateIndex": grpc.unary_unary_rpc_method_handler( - servicer.CreateIndex, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.CreateIndexRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListIndexes": grpc.unary_unary_rpc_method_handler( - servicer.ListIndexes, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesResponse.SerializeToString, - ), - "GetIndex": grpc.unary_unary_rpc_method_handler( - servicer.GetIndex, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.GetIndexRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.SerializeToString, - ), - "DeleteIndex": grpc.unary_unary_rpc_method_handler( - servicer.DeleteIndex, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.DeleteIndexRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.firestore.admin.v1beta1.FirestoreAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2.py deleted file mode 100644 index de43ee88e44c..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2.py +++ /dev/null @@ -1,300 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/admin/index.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/admin/index.proto", - package="google.firestore.admin.v1beta1", - syntax="proto3", - serialized_pb=_b( - '\n6google/cloud/firestore_v1beta1/proto/admin/index.proto\x12\x1egoogle.firestore.admin.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x9c\x01\n\nIndexField\x12\x12\n\nfield_path\x18\x01 \x01(\t\x12=\n\x04mode\x18\x02 \x01(\x0e\x32/.google.firestore.admin.v1beta1.IndexField.Mode";\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x02\x12\x0e\n\nDESCENDING\x10\x03"\xe8\x01\n\x05Index\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rcollection_id\x18\x02 \x01(\t\x12:\n\x06\x66ields\x18\x03 \x03(\x0b\x32*.google.firestore.admin.v1beta1.IndexField\x12:\n\x05state\x18\x06 \x01(\x0e\x32+.google.firestore.admin.v1beta1.Index.State"B\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x03\x12\t\n\x05READY\x10\x02\x12\t\n\x05\x45RROR\x10\x05\x42\xa5\x01\n"com.google.firestore.admin.v1beta1B\nIndexProtoP\x01ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\xa2\x02\x04GCFS\xaa\x02$Google.Cloud.Firestore.Admin.V1Beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], -) - - -_INDEXFIELD_MODE = _descriptor.EnumDescriptor( - name="Mode", - full_name="google.firestore.admin.v1beta1.IndexField.Mode", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="MODE_UNSPECIFIED", index=0, number=0, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ASCENDING", index=1, number=2, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DESCENDING", index=2, number=3, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=218, - serialized_end=277, -) -_sym_db.RegisterEnumDescriptor(_INDEXFIELD_MODE) - -_INDEX_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.firestore.admin.v1beta1.Index.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", index=0, number=0, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=1, number=3, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="READY", index=2, number=2, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ERROR", index=3, number=5, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=446, - serialized_end=512, -) -_sym_db.RegisterEnumDescriptor(_INDEX_STATE) - - -_INDEXFIELD = _descriptor.Descriptor( - name="IndexField", - full_name="google.firestore.admin.v1beta1.IndexField", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field_path", - full_name="google.firestore.admin.v1beta1.IndexField.field_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="mode", - full_name="google.firestore.admin.v1beta1.IndexField.mode", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INDEXFIELD_MODE], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=121, - serialized_end=277, -) - - -_INDEX = _descriptor.Descriptor( - name="Index", - full_name="google.firestore.admin.v1beta1.Index", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.admin.v1beta1.Index.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="collection_id", - full_name="google.firestore.admin.v1beta1.Index.collection_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.admin.v1beta1.Index.fields", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.firestore.admin.v1beta1.Index.state", - index=3, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INDEX_STATE], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=280, - serialized_end=512, -) - -_INDEXFIELD.fields_by_name["mode"].enum_type = _INDEXFIELD_MODE -_INDEXFIELD_MODE.containing_type = _INDEXFIELD -_INDEX.fields_by_name["fields"].message_type = _INDEXFIELD -_INDEX.fields_by_name["state"].enum_type = _INDEX_STATE -_INDEX_STATE.containing_type = _INDEX -DESCRIPTOR.message_types_by_name["IndexField"] = _INDEXFIELD -DESCRIPTOR.message_types_by_name["Index"] = _INDEX -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -IndexField = _reflection.GeneratedProtocolMessageType( - "IndexField", - (_message.Message,), - dict( - DESCRIPTOR=_INDEXFIELD, - __module__="google.cloud.firestore_v1beta1.proto.admin.index_pb2", - __doc__="""A field of an index. - - - Attributes: - field_path: - The path of the field. Must match the field path specification - described by - [google.firestore.v1beta1.Document.fields][fields]. Special - field path ``__name__`` may be used by itself or at the end of - a path. ``__type__`` may be used only at the end of path. - mode: - The field's mode. - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.IndexField) - ), -) -_sym_db.RegisterMessage(IndexField) - -Index = _reflection.GeneratedProtocolMessageType( - "Index", - (_message.Message,), - dict( - DESCRIPTOR=_INDEX, - __module__="google.cloud.firestore_v1beta1.proto.admin.index_pb2", - __doc__="""An index definition. - - - Attributes: - name: - The resource name of the index. - collection_id: - The collection ID to which this index applies. Required. - fields: - The fields to index. - state: - The state of the index. The state is read-only. @OutputOnly - """, - # @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.Index) - ), -) -_sym_db.RegisterMessage(Index) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - '\n"com.google.firestore.admin.v1beta1B\nIndexProtoP\001ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\242\002\004GCFS\252\002$Google.Cloud.Firestore.Admin.V1Beta1' - ), -) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/admin/index_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/proto/common.proto b/firestore/google/cloud/firestore_v1beta1/proto/common.proto deleted file mode 100644 index 2eaa183470d7..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/common.proto +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1beta1; - -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.firestore.v1beta1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1beta1"; - -// A set of field paths on a document. -// Used to restrict a get or update operation on a document to a subset of its -// fields. -// This is different from standard field masks, as this is always scoped to a -// [Document][google.firestore.v1beta1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1beta1.Value]. -message DocumentMask { - // The list of field paths in the mask. See [Document.fields][google.firestore.v1beta1.Document.fields] for a field - // path syntax reference. - repeated string field_paths = 1; -} - -// A precondition on a document, used for conditional operations. -message Precondition { - // The type of precondition. - oneof condition_type { - // When set to `true`, the target document must exist. - // When set to `false`, the target document must not exist. - bool exists = 1; - - // When set, the target document must exist and have been last updated at - // that time. - google.protobuf.Timestamp update_time = 2; - } -} - -// Options for creating a new transaction. -message TransactionOptions { - // Options for a transaction that can be used to read and write documents. - message ReadWrite { - // An optional transaction to retry. - bytes retry_transaction = 1; - } - - // Options for a transaction that can only be used to read documents. - message ReadOnly { - // The consistency mode for this transaction. If not set, defaults to strong - // consistency. - oneof consistency_selector { - // Reads documents at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 2; - } - } - - // The mode of the transaction. - oneof mode { - // The transaction can only be used for read operations. - ReadOnly read_only = 2; - - // The transaction can be used for both read and write operations. - ReadWrite read_write = 3; - } -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/common_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/common_pb2.py deleted file mode 100644 index 8469940a4c1b..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/common_pb2.py +++ /dev/null @@ -1,454 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/common.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/common.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.firestore.v1beta1B\013CommonProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1" - ), - serialized_pb=_b( - '\n1google/cloud/firestore_v1beta1/proto/common.proto\x12\x18google.firestore.v1beta1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"#\n\x0c\x44ocumentMask\x12\x13\n\x0b\x66ield_paths\x18\x01 \x03(\t"e\n\x0cPrecondition\x12\x10\n\x06\x65xists\x18\x01 \x01(\x08H\x00\x12\x31\n\x0bupdate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x10\n\x0e\x63ondition_type"\xb3\x02\n\x12TransactionOptions\x12J\n\tread_only\x18\x02 \x01(\x0b\x32\x35.google.firestore.v1beta1.TransactionOptions.ReadOnlyH\x00\x12L\n\nread_write\x18\x03 \x01(\x0b\x32\x36.google.firestore.v1beta1.TransactionOptions.ReadWriteH\x00\x1a&\n\tReadWrite\x12\x19\n\x11retry_transaction\x18\x01 \x01(\x0c\x1aS\n\x08ReadOnly\x12/\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x16\n\x14\x63onsistency_selectorB\x06\n\x04modeB\xb9\x01\n\x1c\x63om.google.firestore.v1beta1B\x0b\x43ommonProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_DOCUMENTMASK = _descriptor.Descriptor( - name="DocumentMask", - full_name="google.firestore.v1beta1.DocumentMask", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field_paths", - full_name="google.firestore.v1beta1.DocumentMask.field_paths", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=142, - serialized_end=177, -) - - -_PRECONDITION = _descriptor.Descriptor( - name="Precondition", - full_name="google.firestore.v1beta1.Precondition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="exists", - full_name="google.firestore.v1beta1.Precondition.exists", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.firestore.v1beta1.Precondition.update_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="condition_type", - full_name="google.firestore.v1beta1.Precondition.condition_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=179, - serialized_end=280, -) - - -_TRANSACTIONOPTIONS_READWRITE = _descriptor.Descriptor( - name="ReadWrite", - full_name="google.firestore.v1beta1.TransactionOptions.ReadWrite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="retry_transaction", - full_name="google.firestore.v1beta1.TransactionOptions.ReadWrite.retry_transaction", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=497, -) - -_TRANSACTIONOPTIONS_READONLY = _descriptor.Descriptor( - name="ReadOnly", - full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly.read_time", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="consistency_selector", - full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly.consistency_selector", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=499, - serialized_end=582, -) - -_TRANSACTIONOPTIONS = _descriptor.Descriptor( - name="TransactionOptions", - full_name="google.firestore.v1beta1.TransactionOptions", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="read_only", - full_name="google.firestore.v1beta1.TransactionOptions.read_only", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_write", - full_name="google.firestore.v1beta1.TransactionOptions.read_write", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_TRANSACTIONOPTIONS_READWRITE, _TRANSACTIONOPTIONS_READONLY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mode", - full_name="google.firestore.v1beta1.TransactionOptions.mode", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=283, - serialized_end=590, -) - -_PRECONDITION.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_PRECONDITION.oneofs_by_name["condition_type"].fields.append( - _PRECONDITION.fields_by_name["exists"] -) -_PRECONDITION.fields_by_name["exists"].containing_oneof = _PRECONDITION.oneofs_by_name[ - "condition_type" -] -_PRECONDITION.oneofs_by_name["condition_type"].fields.append( - _PRECONDITION.fields_by_name["update_time"] -) -_PRECONDITION.fields_by_name[ - "update_time" -].containing_oneof = _PRECONDITION.oneofs_by_name["condition_type"] -_TRANSACTIONOPTIONS_READWRITE.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONOPTIONS_READONLY.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["read_time"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_time" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"] -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].message_type = _TRANSACTIONOPTIONS_READONLY -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].message_type = _TRANSACTIONOPTIONS_READWRITE -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_only"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_write"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -DESCRIPTOR.message_types_by_name["DocumentMask"] = _DOCUMENTMASK -DESCRIPTOR.message_types_by_name["Precondition"] = _PRECONDITION -DESCRIPTOR.message_types_by_name["TransactionOptions"] = _TRANSACTIONOPTIONS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -DocumentMask = _reflection.GeneratedProtocolMessageType( - "DocumentMask", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTMASK, - __module__="google.cloud.firestore_v1beta1.proto.common_pb2", - __doc__="""A set of field paths on a document. Used to restrict a get - or update operation on a document to a subset of its fields. This is - different from standard field masks, as this is always scoped to a - [Document][google.firestore.v1beta1.Document], and takes in account the - dynamic nature of [Value][google.firestore.v1beta1.Value]. - - - Attributes: - field_paths: - The list of field paths in the mask. See - [Document.fields][google.firestore.v1beta1.Document.fields] - for a field path syntax reference. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentMask) - ), -) -_sym_db.RegisterMessage(DocumentMask) - -Precondition = _reflection.GeneratedProtocolMessageType( - "Precondition", - (_message.Message,), - dict( - DESCRIPTOR=_PRECONDITION, - __module__="google.cloud.firestore_v1beta1.proto.common_pb2", - __doc__="""A precondition on a document, used for conditional - operations. - - - Attributes: - condition_type: - The type of precondition. - exists: - When set to ``true``, the target document must exist. When set - to ``false``, the target document must not exist. - update_time: - When set, the target document must exist and have been last - updated at that time. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Precondition) - ), -) -_sym_db.RegisterMessage(Precondition) - -TransactionOptions = _reflection.GeneratedProtocolMessageType( - "TransactionOptions", - (_message.Message,), - dict( - ReadWrite=_reflection.GeneratedProtocolMessageType( - "ReadWrite", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONOPTIONS_READWRITE, - __module__="google.cloud.firestore_v1beta1.proto.common_pb2", - __doc__="""Options for a transaction that can be used to read and - write documents. - - - Attributes: - retry_transaction: - An optional transaction to retry. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions.ReadWrite) - ), - ), - ReadOnly=_reflection.GeneratedProtocolMessageType( - "ReadOnly", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONOPTIONS_READONLY, - __module__="google.cloud.firestore_v1beta1.proto.common_pb2", - __doc__="""Options for a transaction that can only be used to read - documents. - - - Attributes: - consistency_selector: - The consistency mode for this transaction. If not set, - defaults to strong consistency. - read_time: - Reads documents at the given time. This may not be older than - 60 seconds. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions.ReadOnly) - ), - ), - DESCRIPTOR=_TRANSACTIONOPTIONS, - __module__="google.cloud.firestore_v1beta1.proto.common_pb2", - __doc__="""Options for creating a new transaction. - - - Attributes: - mode: - The mode of the transaction. - read_only: - The transaction can only be used for read operations. - read_write: - The transaction can be used for both read and write - operations. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions) - ), -) -_sym_db.RegisterMessage(TransactionOptions) -_sym_db.RegisterMessage(TransactionOptions.ReadWrite) -_sym_db.RegisterMessage(TransactionOptions.ReadOnly) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/common_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/common_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/common_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/proto/document.proto b/firestore/google/cloud/firestore_v1beta1/proto/document.proto deleted file mode 100644 index 7caae4688a5f..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/document.proto +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1beta1; - -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/type/latlng.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "DocumentProto"; -option java_package = "com.google.firestore.v1beta1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1beta1"; - -// A Firestore document. -// -// Must not exceed 1 MiB - 4 bytes. -message Document { - // The resource name of the document, for example - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1; - - // The document's fields. - // - // The map keys represent field names. - // - // A simple field name contains only characters `a` to `z`, `A` to `Z`, - // `0` to `9`, or `_`, and must not start with `0` to `9`. For example, - // `foo_bar_17`. - // - // Field names matching the regular expression `__.*__` are reserved. Reserved - // field names are forbidden except in certain documented contexts. The map - // keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be - // empty. - // - // Field paths may be used in other contexts to refer to structured fields - // defined here. For `map_value`, the field path is represented by the simple - // or quoted field names of the containing fields, delimited by `.`. For - // example, the structured field - // `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be - // represented by the field path `foo.x&y`. - // - // Within a field path, a quoted field name starts and ends with `` ` `` and - // may contain any character. Some characters, including `` ` ``, must be - // escaped using a `\`. For example, `` `x&y` `` represents `x&y` and - // `` `bak\`tik` `` represents `` bak`tik ``. - map fields = 2; - - // Output only. The time at which the document was created. - // - // This value increases monotonically when a document is deleted then - // recreated. It can also be compared to values from other documents and - // the `read_time` of a query. - google.protobuf.Timestamp create_time = 3; - - // Output only. The time at which the document was last changed. - // - // This value is initially set to the `create_time` then increases - // monotonically with each change to the document. It can also be - // compared to values from other documents and the `read_time` of a query. - google.protobuf.Timestamp update_time = 4; -} - -// A message that can hold any of the supported value types. -message Value { - // Must have a value set. - oneof value_type { - // A null value. - google.protobuf.NullValue null_value = 11; - - // A boolean value. - bool boolean_value = 1; - - // An integer value. - int64 integer_value = 2; - - // A double value. - double double_value = 3; - - // A timestamp value. - // - // Precise only to microseconds. When stored, any additional precision is - // rounded down. - google.protobuf.Timestamp timestamp_value = 10; - - // A string value. - // - // The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes of the UTF-8 representation are considered by - // queries. - string string_value = 17; - - // A bytes value. - // - // Must not exceed 1 MiB - 89 bytes. - // Only the first 1,500 bytes are considered by queries. - bytes bytes_value = 18; - - // A reference to a document. For example: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string reference_value = 5; - - // A geo point value representing a point on the surface of Earth. - google.type.LatLng geo_point_value = 8; - - // An array value. - // - // Cannot directly contain another array value, though can contain an - // map which contains another array. - ArrayValue array_value = 9; - - // A map value. - MapValue map_value = 6; - } -} - -// An array value. -message ArrayValue { - // Values in the array. - repeated Value values = 1; -} - -// A map value. -message MapValue { - // The map's fields. - // - // The map keys represent field names. Field names matching the regular - // expression `__.*__` are reserved. Reserved field names are forbidden except - // in certain documented contexts. The map keys, represented as UTF-8, must - // not exceed 1,500 bytes and cannot be empty. - map fields = 1; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/document_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/document_pb2.py deleted file mode 100644 index 4ca1f65ed709..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/document_pb2.py +++ /dev/null @@ -1,798 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/document.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/document.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.firestore.v1beta1B\rDocumentProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1" - ), - serialized_pb=_b( - '\n3google/cloud/firestore_v1beta1/proto/document.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x18google/type/latlng.proto\x1a\x1cgoogle/api/annotations.proto"\x8a\x02\n\x08\x44ocument\x12\x0c\n\x04name\x18\x01 \x01(\t\x12>\n\x06\x66ields\x18\x02 \x03(\x0b\x32..google.firestore.v1beta1.Document.FieldsEntry\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1aN\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.firestore.v1beta1.Value:\x02\x38\x01"\xb8\x03\n\x05Value\x12\x30\n\nnull_value\x18\x0b \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x17\n\rboolean_value\x18\x01 \x01(\x08H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x16\n\x0cstring_value\x18\x11 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x12 \x01(\x0cH\x00\x12\x19\n\x0freference_value\x18\x05 \x01(\tH\x00\x12.\n\x0fgeo_point_value\x18\x08 \x01(\x0b\x32\x13.google.type.LatLngH\x00\x12;\n\x0b\x61rray_value\x18\t \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00\x12\x37\n\tmap_value\x18\x06 \x01(\x0b\x32".google.firestore.v1beta1.MapValueH\x00\x42\x0c\n\nvalue_type"=\n\nArrayValue\x12/\n\x06values\x18\x01 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Value"\x9a\x01\n\x08MapValue\x12>\n\x06\x66ields\x18\x01 \x03(\x0b\x32..google.firestore.v1beta1.MapValue.FieldsEntry\x1aN\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.firestore.v1beta1.Value:\x02\x38\x01\x42\xbb\x01\n\x1c\x63om.google.firestore.v1beta1B\rDocumentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_type_dot_latlng__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_DOCUMENT_FIELDSENTRY = _descriptor.Descriptor( - name="FieldsEntry", - full_name="google.firestore.v1beta1.Document.FieldsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.firestore.v1beta1.Document.FieldsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.firestore.v1beta1.Document.FieldsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=389, - serialized_end=467, -) - -_DOCUMENT = _descriptor.Descriptor( - name="Document", - full_name="google.firestore.v1beta1.Document", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.v1beta1.Document.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.v1beta1.Document.fields", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.firestore.v1beta1.Document.create_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.firestore.v1beta1.Document.update_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_DOCUMENT_FIELDSENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=201, - serialized_end=467, -) - - -_VALUE = _descriptor.Descriptor( - name="Value", - full_name="google.firestore.v1beta1.Value", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="null_value", - full_name="google.firestore.v1beta1.Value.null_value", - index=0, - number=11, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="boolean_value", - full_name="google.firestore.v1beta1.Value.boolean_value", - index=1, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="integer_value", - full_name="google.firestore.v1beta1.Value.integer_value", - index=2, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="double_value", - full_name="google.firestore.v1beta1.Value.double_value", - index=3, - number=3, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="timestamp_value", - full_name="google.firestore.v1beta1.Value.timestamp_value", - index=4, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="string_value", - full_name="google.firestore.v1beta1.Value.string_value", - index=5, - number=17, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="bytes_value", - full_name="google.firestore.v1beta1.Value.bytes_value", - index=6, - number=18, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="reference_value", - full_name="google.firestore.v1beta1.Value.reference_value", - index=7, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="geo_point_value", - full_name="google.firestore.v1beta1.Value.geo_point_value", - index=8, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="array_value", - full_name="google.firestore.v1beta1.Value.array_value", - index=9, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="map_value", - full_name="google.firestore.v1beta1.Value.map_value", - index=10, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="value_type", - full_name="google.firestore.v1beta1.Value.value_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=470, - serialized_end=910, -) - - -_ARRAYVALUE = _descriptor.Descriptor( - name="ArrayValue", - full_name="google.firestore.v1beta1.ArrayValue", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="values", - full_name="google.firestore.v1beta1.ArrayValue.values", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=912, - serialized_end=973, -) - - -_MAPVALUE_FIELDSENTRY = _descriptor.Descriptor( - name="FieldsEntry", - full_name="google.firestore.v1beta1.MapValue.FieldsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.firestore.v1beta1.MapValue.FieldsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.firestore.v1beta1.MapValue.FieldsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=389, - serialized_end=467, -) - -_MAPVALUE = _descriptor.Descriptor( - name="MapValue", - full_name="google.firestore.v1beta1.MapValue", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.v1beta1.MapValue.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[_MAPVALUE_FIELDSENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=976, - serialized_end=1130, -) - -_DOCUMENT_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE -_DOCUMENT_FIELDSENTRY.containing_type = _DOCUMENT -_DOCUMENT.fields_by_name["fields"].message_type = _DOCUMENT_FIELDSENTRY -_DOCUMENT.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DOCUMENT.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_VALUE.fields_by_name[ - "null_value" -].enum_type = google_dot_protobuf_dot_struct__pb2._NULLVALUE -_VALUE.fields_by_name[ - "timestamp_value" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_VALUE.fields_by_name[ - "geo_point_value" -].message_type = google_dot_type_dot_latlng__pb2._LATLNG -_VALUE.fields_by_name["array_value"].message_type = _ARRAYVALUE -_VALUE.fields_by_name["map_value"].message_type = _MAPVALUE -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["null_value"]) -_VALUE.fields_by_name["null_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append( - _VALUE.fields_by_name["boolean_value"] -) -_VALUE.fields_by_name["boolean_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append( - _VALUE.fields_by_name["integer_value"] -) -_VALUE.fields_by_name["integer_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["double_value"]) -_VALUE.fields_by_name["double_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append( - _VALUE.fields_by_name["timestamp_value"] -) -_VALUE.fields_by_name["timestamp_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["string_value"]) -_VALUE.fields_by_name["string_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["bytes_value"]) -_VALUE.fields_by_name["bytes_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append( - _VALUE.fields_by_name["reference_value"] -) -_VALUE.fields_by_name["reference_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append( - _VALUE.fields_by_name["geo_point_value"] -) -_VALUE.fields_by_name["geo_point_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["array_value"]) -_VALUE.fields_by_name["array_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["map_value"]) -_VALUE.fields_by_name["map_value"].containing_oneof = _VALUE.oneofs_by_name[ - "value_type" -] -_ARRAYVALUE.fields_by_name["values"].message_type = _VALUE -_MAPVALUE_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE -_MAPVALUE_FIELDSENTRY.containing_type = _MAPVALUE -_MAPVALUE.fields_by_name["fields"].message_type = _MAPVALUE_FIELDSENTRY -DESCRIPTOR.message_types_by_name["Document"] = _DOCUMENT -DESCRIPTOR.message_types_by_name["Value"] = _VALUE -DESCRIPTOR.message_types_by_name["ArrayValue"] = _ARRAYVALUE -DESCRIPTOR.message_types_by_name["MapValue"] = _MAPVALUE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Document = _reflection.GeneratedProtocolMessageType( - "Document", - (_message.Message,), - dict( - FieldsEntry=_reflection.GeneratedProtocolMessageType( - "FieldsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENT_FIELDSENTRY, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2" - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Document.FieldsEntry) - ), - ), - DESCRIPTOR=_DOCUMENT, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2", - __doc__="""A Firestore document. - - Must not exceed 1 MiB - 4 bytes. - - - Attributes: - name: - The resource name of the document, for example ``projects/{pro - ject_id}/databases/{database_id}/documents/{document_path}``. - fields: - The document's fields. The map keys represent field names. A - simple field name contains only characters ``a`` to ``z``, - ``A`` to ``Z``, ``0`` to ``9``, or ``_``, and must not start - with ``0`` to ``9``. For example, ``foo_bar_17``. Field names - matching the regular expression ``__.*__`` are reserved. - Reserved field names are forbidden except in certain - documented contexts. The map keys, represented as UTF-8, must - not exceed 1,500 bytes and cannot be empty. Field paths may - be used in other contexts to refer to structured fields - defined here. For ``map_value``, the field path is represented - by the simple or quoted field names of the containing fields, - delimited by ``.``. For example, the structured field ``"foo" - : { map_value: { "x&y" : { string_value: "hello" }}}`` would - be represented by the field path ``foo.x&y``. Within a field - path, a quoted field name starts and ends with ````` and may - contain any character. Some characters, including `````, must - be escaped using a ``\``. For example, ```x&y``` represents - ``x&y`` and ```bak\`tik``` represents ``bak`tik``. - create_time: - Output only. The time at which the document was created. This - value increases monotonically when a document is deleted then - recreated. It can also be compared to values from other - documents and the ``read_time`` of a query. - update_time: - Output only. The time at which the document was last changed. - This value is initially set to the ``create_time`` then - increases monotonically with each change to the document. It - can also be compared to values from other documents and the - ``read_time`` of a query. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Document) - ), -) -_sym_db.RegisterMessage(Document) -_sym_db.RegisterMessage(Document.FieldsEntry) - -Value = _reflection.GeneratedProtocolMessageType( - "Value", - (_message.Message,), - dict( - DESCRIPTOR=_VALUE, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2", - __doc__="""A message that can hold any of the supported value types. - - - Attributes: - value_type: - Must have a value set. - null_value: - A null value. - boolean_value: - A boolean value. - integer_value: - An integer value. - double_value: - A double value. - timestamp_value: - A timestamp value. Precise only to microseconds. When stored, - any additional precision is rounded down. - string_value: - A string value. The string, represented as UTF-8, must not - exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the - UTF-8 representation are considered by queries. - bytes_value: - A bytes value. Must not exceed 1 MiB - 89 bytes. Only the - first 1,500 bytes are considered by queries. - reference_value: - A reference to a document. For example: ``projects/{project_id - }/databases/{database_id}/documents/{document_path}``. - geo_point_value: - A geo point value representing a point on the surface of - Earth. - array_value: - An array value. Cannot directly contain another array value, - though can contain an map which contains another array. - map_value: - A map value. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Value) - ), -) -_sym_db.RegisterMessage(Value) - -ArrayValue = _reflection.GeneratedProtocolMessageType( - "ArrayValue", - (_message.Message,), - dict( - DESCRIPTOR=_ARRAYVALUE, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2", - __doc__="""An array value. - - - Attributes: - values: - Values in the array. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ArrayValue) - ), -) -_sym_db.RegisterMessage(ArrayValue) - -MapValue = _reflection.GeneratedProtocolMessageType( - "MapValue", - (_message.Message,), - dict( - FieldsEntry=_reflection.GeneratedProtocolMessageType( - "FieldsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_MAPVALUE_FIELDSENTRY, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2" - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.MapValue.FieldsEntry) - ), - ), - DESCRIPTOR=_MAPVALUE, - __module__="google.cloud.firestore_v1beta1.proto.document_pb2", - __doc__="""A map value. - - - Attributes: - fields: - The map's fields. The map keys represent field names. Field - names matching the regular expression ``__.*__`` are reserved. - Reserved field names are forbidden except in certain - documented contexts. The map keys, represented as UTF-8, must - not exceed 1,500 bytes and cannot be empty. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.MapValue) - ), -) -_sym_db.RegisterMessage(MapValue) -_sym_db.RegisterMessage(MapValue.FieldsEntry) - - -DESCRIPTOR._options = None -_DOCUMENT_FIELDSENTRY._options = None -_MAPVALUE_FIELDSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/document_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/document_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/document_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py deleted file mode 100644 index 957acef2695c..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py +++ /dev/null @@ -1,62 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.firestore_v1beta1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_pb=_b( - "\nEgoogle/cloud/firestore_v1beta1/proto/event_flow_document_change.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.protoB\xa2\x01\n\x1c\x63om.google.firestore.v1beta1B\x1c\x45ventFlowDocumentChangeProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1b\x06proto3" - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR, - ], -) - - -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.firestore.v1beta1B\034EventFlowDocumentChangeProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\252\002\036Google.Cloud.Firestore.V1Beta1" - ), -) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/proto/field.proto b/firestore/google/cloud/firestore_v1beta1/proto/field.proto deleted file mode 100644 index 9d1534eb1f63..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/field.proto +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/api/annotations.proto"; -import "google/firestore/admin/v1beta2/index.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "FieldProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - - -// Represents a single field in the database. -// -// Fields are grouped by their "Collection Group", which represent all -// collections in the database with the same id. -message Field { - // The index configuration for this field. - message IndexConfig { - // The indexes supported for this field. - repeated Index indexes = 1; - - // Output only. - // When true, the `Field`'s index configuration is set from the - // configuration specified by the `ancestor_field`. - // When false, the `Field`'s index configuration is defined explicitly. - bool uses_ancestor_config = 2; - - // Output only. - // Specifies the resource name of the `Field` from which this field's - // index configuration is set (when `uses_ancestor_config` is true), - // or from which it *would* be set if this field had no index configuration - // (when `uses_ancestor_config` is false). - string ancestor_field = 3; - - // Output only - // When true, the `Field`'s index configuration is in the process of being - // reverted. Once complete, the index config will transition to the same - // state as the field specified by `ancestor_field`, at which point - // `uses_ancestor_config` will be `true` and `reverting` will be `false`. - bool reverting = 4; - } - - // A field name of the form - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - // - // A field path may be a simple field name, e.g. `address` or a path to fields - // within map_value , e.g. `address.city`, - // or a special field path. The only valid special field is `*`, which - // represents any field. - // - // Field paths may be quoted using ` (backtick). The only character that needs - // to be escaped within a quoted field path is the backtick character itself, - // escaped using a backslash. Special characters in field paths that - // must be quoted include: `*`, `.`, - // ``` (backtick), `[`, `]`, as well as any ascii symbolic characters. - // - // Examples: - // (Note: Comments here are written in markdown syntax, so there is an - // additional layer of backticks to represent a code block) - // `\`address.city\`` represents a field named `address.city`, not the map key - // `city` in the field `address`. - // `\`*\`` represents a field named `*`, not any field. - // - // A special `Field` contains the default indexing settings for all fields. - // This field's resource name is: - // `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*` - // Indexes defined on this `Field` will be applied to all fields which do not - // have their own `Field` index configuration. - string name = 1; - - // The index configuration for this field. If unset, field indexing will - // revert to the configuration defined by the `ancestor_field`. To - // explicitly remove all indexes for this field, specify an index config - // with an empty list of indexes. - IndexConfig index_config = 2; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/firestore.proto b/firestore/google/cloud/firestore_v1beta1/proto/firestore.proto deleted file mode 100644 index c2b15b04870e..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/firestore.proto +++ /dev/null @@ -1,765 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1beta1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/firestore/v1beta1/common.proto"; -import "google/firestore/v1beta1/document.proto"; -import "google/firestore/v1beta1/query.proto"; -import "google/firestore/v1beta1/write.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreProto"; -option java_package = "com.google.firestore.v1beta1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1beta1"; - -// Specification of the Firestore API. - -// The Cloud Firestore service. -// -// This service exposes several types of comparable timestamps: -// -// * `create_time` - The time at which a document was created. Changes only -// when a document is deleted, then re-created. Increases in a strict -// monotonic fashion. -// * `update_time` - The time at which a document was last updated. Changes -// every time a document is modified. Does not change when a write results -// in no modifications. Increases in a strict monotonic fashion. -// * `read_time` - The time at which a particular state was observed. Used -// to denote a consistent snapshot of the database or the time at which a -// Document was observed to not exist. -// * `commit_time` - The time at which the writes in a transaction were -// committed. Any read with an equal or greater `read_time` is guaranteed -// to see the effects of the transaction. -service Firestore { - option (google.api.default_host) = "firestore.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/datastore"; - - // Gets a single document. - rpc GetDocument(GetDocumentRequest) returns (Document) { - option (google.api.http) = { - get: "/v1beta1/{name=projects/*/databases/*/documents/*/**}" - }; - } - - // Lists documents. - rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) { - option (google.api.http) = { - get: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" - }; - } - - // Creates a new document. - rpc CreateDocument(CreateDocumentRequest) returns (Document) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/databases/*/documents/**}/{collection_id}" - body: "document" - }; - } - - // Updates or inserts a document. - rpc UpdateDocument(UpdateDocumentRequest) returns (Document) { - option (google.api.http) = { - patch: "/v1beta1/{document.name=projects/*/databases/*/documents/*/**}" - body: "document" - }; - option (google.api.method_signature) = "document,update_mask"; - } - - // Deletes a document. - rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1beta1/{name=projects/*/databases/*/documents/*/**}" - }; - option (google.api.method_signature) = "name"; - } - - // Gets multiple documents. - // - // Documents returned by this method are not guaranteed to be returned in the - // same order that they were requested. - rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:batchGet" - body: "*" - }; - } - - // Starts a new transaction. - rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:beginTransaction" - body: "*" - }; - option (google.api.method_signature) = "database"; - } - - // Commits a transaction, while optionally updating documents. - rpc Commit(CommitRequest) returns (CommitResponse) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:commit" - body: "*" - }; - option (google.api.method_signature) = "database,writes"; - } - - // Rolls back a transaction. - rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:rollback" - body: "*" - }; - option (google.api.method_signature) = "database,transaction"; - } - - // Runs a query. - rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/databases/*/documents}:runQuery" - body: "*" - additional_bindings { - post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:runQuery" - body: "*" - } - }; - } - - // Streams batches of document updates and deletes, in order. - rpc Write(stream WriteRequest) returns (stream WriteResponse) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:write" - body: "*" - }; - } - - // Listens to changes. - rpc Listen(stream ListenRequest) returns (stream ListenResponse) { - option (google.api.http) = { - post: "/v1beta1/{database=projects/*/databases/*}/documents:listen" - body: "*" - }; - } - - // Lists all the collection IDs underneath a document. - rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/databases/*/documents}:listCollectionIds" - body: "*" - additional_bindings { - post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds" - body: "*" - } - }; - option (google.api.method_signature) = "parent"; - } -} - -// The request for [Firestore.GetDocument][google.firestore.v1beta1.Firestore.GetDocument]. -message GetDocumentRequest { - // Required. The resource name of the Document to get. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 2; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads the document in a transaction. - bytes transaction = 3; - - // Reads the version of the document at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 5; - } -} - -// The request for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. -message ListDocumentsRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms` - // or `messages`. - string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // The maximum number of documents to return. - int32 page_size = 3; - - // The `next_page_token` value returned from a previous List request, if any. - string page_token = 4; - - // The order to sort results by. For example: `priority desc, name`. - string order_by = 6; - - // The fields to return. If not set, returns all fields. - // - // If a document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 7; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 8; - - // Reads documents as they were at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 10; - } - - // If the list should show missing documents. A missing document is a - // document that does not exist but has sub-documents. These documents will - // be returned with a key but will not have fields, [Document.create_time][google.firestore.v1beta1.Document.create_time], - // or [Document.update_time][google.firestore.v1beta1.Document.update_time] set. - // - // Requests with `show_missing` may not specify `where` or - // `order_by`. - bool show_missing = 12; -} - -// The response for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. -message ListDocumentsResponse { - // The Documents found. - repeated Document documents = 1; - - // The next page token. - string next_page_token = 2; -} - -// The request for [Firestore.CreateDocument][google.firestore.v1beta1.Firestore.CreateDocument]. -message CreateDocumentRequest { - // Required. The parent resource. For example: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`. - string collection_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // The client-assigned document ID to use for this document. - // - // Optional. If not specified, an ID will be assigned by the service. - string document_id = 3; - - // Required. The document to create. `name` must not be set. - Document document = 4 [(google.api.field_behavior) = REQUIRED]; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 5; -} - -// The request for [Firestore.UpdateDocument][google.firestore.v1beta1.Firestore.UpdateDocument]. -message UpdateDocumentRequest { - // Required. The updated document. - // Creates the document if it does not already exist. - Document document = 1 [(google.api.field_behavior) = REQUIRED]; - - // The fields to update. - // None of the field paths in the mask may contain a reserved name. - // - // If the document exists on the server and has fields not referenced in the - // mask, they are left unchanged. - // Fields referenced in the mask, but not present in the input document, are - // deleted from the document on the server. - DocumentMask update_mask = 2; - - // The fields to return. If not set, returns all fields. - // - // If the document has a field that is not present in this mask, that field - // will not be returned in the response. - DocumentMask mask = 3; - - // An optional precondition on the document. - // The request will fail if this is set and not met by the target document. - Precondition current_document = 4; -} - -// The request for [Firestore.DeleteDocument][google.firestore.v1beta1.Firestore.DeleteDocument]. -message DeleteDocumentRequest { - // Required. The resource name of the Document to delete. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // An optional precondition on the document. - // The request will fail if this is set and not met by the target document. - Precondition current_document = 2; -} - -// The request for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. -message BatchGetDocumentsRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The names of the documents to retrieve. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // The request will fail if any of the document is not a child resource of the - // given `database`. Duplicate names will be elided. - repeated string documents = 2; - - // The fields to return. If not set, returns all fields. - // - // If a document has a field that is not present in this mask, that field will - // not be returned in the response. - DocumentMask mask = 3; - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 4; - - // Starts a new transaction and reads the documents. - // Defaults to a read-only transaction. - // The new transaction ID will be returned as the first response in the - // stream. - TransactionOptions new_transaction = 5; - - // Reads documents as they were at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 7; - } -} - -// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. -message BatchGetDocumentsResponse { - // A single result. - // This can be empty if the server is just returning a transaction. - oneof result { - // A document that was requested. - Document found = 1; - - // A document name that was requested but does not exist. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string missing = 2; - } - - // The transaction that was started as part of this request. - // Will only be set in the first response, and only if - // [BatchGetDocumentsRequest.new_transaction][google.firestore.v1beta1.BatchGetDocumentsRequest.new_transaction] was set in the request. - bytes transaction = 3; - - // The time at which the document was read. - // This may be monotically increasing, in this case the previous documents in - // the result stream are guaranteed not to have changed between their - // read_time and this one. - google.protobuf.Timestamp read_time = 4; -} - -// The request for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. -message BeginTransactionRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The options for the transaction. - // Defaults to a read-write transaction. - TransactionOptions options = 2; -} - -// The response for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. -message BeginTransactionResponse { - // The transaction that was started. - bytes transaction = 1; -} - -// The request for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. -message CommitRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The writes to apply. - // - // Always executed atomically and in order. - repeated Write writes = 2; - - // If set, applies all writes in this transaction, and commits it. - bytes transaction = 3; -} - -// The response for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. -message CommitResponse { - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 1; - - // The time at which the commit occurred. - google.protobuf.Timestamp commit_time = 2; -} - -// The request for [Firestore.Rollback][google.firestore.v1beta1.Firestore.Rollback]. -message RollbackRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The transaction to roll back. - bytes transaction = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. -message RunQueryRequest { - // Required. The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The query to run. - oneof query_type { - // A structured query. - StructuredQuery structured_query = 2; - } - - // The consistency mode for this transaction. - // If not set, defaults to strong consistency. - oneof consistency_selector { - // Reads documents in a transaction. - bytes transaction = 5; - - // Starts a new transaction and reads the documents. - // Defaults to a read-only transaction. - // The new transaction ID will be returned as the first response in the - // stream. - TransactionOptions new_transaction = 6; - - // Reads documents as they were at the given time. - // This may not be older than 60 seconds. - google.protobuf.Timestamp read_time = 7; - } -} - -// The response for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. -message RunQueryResponse { - // The transaction that was started as part of this request. - // Can only be set in the first response, and only if - // [RunQueryRequest.new_transaction][google.firestore.v1beta1.RunQueryRequest.new_transaction] was set in the request. - // If set, no other fields will be set in this response. - bytes transaction = 2; - - // A query result. - // Not set when reporting partial progress. - Document document = 1; - - // The time at which the document was read. This may be monotonically - // increasing; in this case, the previous documents in the result stream are - // guaranteed not to have changed between their `read_time` and this one. - // - // If the query returns no results, a response with `read_time` and no - // `document` will be sent, and this represents the time at which the query - // was run. - google.protobuf.Timestamp read_time = 3; - - // The number of results that have been skipped due to an offset between - // the last response and the current response. - int32 skipped_results = 4; -} - -// The request for [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. -// -// The first request creates a stream, or resumes an existing one from a token. -// -// When creating a new stream, the server replies with a response containing -// only an ID and a token, to use in the next request. -// -// When resuming a stream, the server first streams any responses later than the -// given token, then a response containing only an up-to-date token, to use in -// the next request. -message WriteRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - // This is only required in the first message. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The ID of the write stream to resume. - // This may only be set in the first message. When left empty, a new write - // stream will be created. - string stream_id = 2; - - // The writes to apply. - // - // Always executed atomically and in order. - // This must be empty on the first request. - // This may be empty on the last request. - // This must not be empty on all other requests. - repeated Write writes = 3; - - // A stream token that was previously sent by the server. - // - // The client should set this field to the token from the most recent - // [WriteResponse][google.firestore.v1beta1.WriteResponse] it has received. This acknowledges that the client has - // received responses up to this token. After sending this token, earlier - // tokens may not be used anymore. - // - // The server may close the stream if there are too many unacknowledged - // responses. - // - // Leave this field unset when creating a new stream. To resume a stream at - // a specific point, set this field and the `stream_id` field. - // - // Leave this field unset when creating a new stream. - bytes stream_token = 4; - - // Labels associated with this write request. - map labels = 5; -} - -// The response for [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. -message WriteResponse { - // The ID of the stream. - // Only set on the first message, when a new stream was created. - string stream_id = 1; - - // A token that represents the position of this response in the stream. - // This can be used by a client to resume the stream at this point. - // - // This field is always set. - bytes stream_token = 2; - - // The result of applying the writes. - // - // This i-th write result corresponds to the i-th write in the - // request. - repeated WriteResult write_results = 3; - - // The time at which the commit occurred. - google.protobuf.Timestamp commit_time = 4; -} - -// A request for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen] -message ListenRequest { - // Required. The database name. In the format: - // `projects/{project_id}/databases/{database_id}`. - string database = 1 [(google.api.field_behavior) = REQUIRED]; - - // The supported target changes. - oneof target_change { - // A target to add to this stream. - Target add_target = 2; - - // The ID of a target to remove from this stream. - int32 remove_target = 3; - } - - // Labels associated with this target change. - map labels = 4; -} - -// The response for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen]. -message ListenResponse { - // The supported responses. - oneof response_type { - // Targets have changed. - TargetChange target_change = 2; - - // A [Document][google.firestore.v1beta1.Document] has changed. - DocumentChange document_change = 3; - - // A [Document][google.firestore.v1beta1.Document] has been deleted. - DocumentDelete document_delete = 4; - - // A [Document][google.firestore.v1beta1.Document] has been removed from a target (because it is no longer - // relevant to that target). - DocumentRemove document_remove = 6; - - // A filter to apply to the set of documents previously returned for the - // given target. - // - // Returned when documents may have been removed from the given target, but - // the exact documents are unknown. - ExistenceFilter filter = 5; - } -} - -// A specification of a set of documents to listen to. -message Target { - // A target specified by a set of documents names. - message DocumentsTarget { - // The names of the documents to retrieve. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // The request will fail if any of the document is not a child resource of - // the given `database`. Duplicate names will be elided. - repeated string documents = 2; - } - - // A target specified by a query. - message QueryTarget { - // The parent resource name. In the format: - // `projects/{project_id}/databases/{database_id}/documents` or - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents` or - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1; - - // The query to run. - oneof query_type { - // A structured query. - StructuredQuery structured_query = 2; - } - } - - // The type of target to listen to. - oneof target_type { - // A target specified by a query. - QueryTarget query = 2; - - // A target specified by a set of document names. - DocumentsTarget documents = 3; - } - - // When to start listening. - // - // If not specified, all matching Documents are returned before any - // subsequent changes. - oneof resume_type { - // A resume token from a prior [TargetChange][google.firestore.v1beta1.TargetChange] for an identical target. - // - // Using a resume token with a different target is unsupported and may fail. - bytes resume_token = 4; - - // Start listening after a specific `read_time`. - // - // The client must know the state of matching documents at this time. - google.protobuf.Timestamp read_time = 11; - } - - // The target ID that identifies the target on the stream. Must be a positive - // number and non-zero. - int32 target_id = 5; - - // If the target should be removed once it is current and consistent. - bool once = 6; -} - -// Targets being watched have changed. -message TargetChange { - // The type of change. - enum TargetChangeType { - // No change has occurred. Used only to send an updated `resume_token`. - NO_CHANGE = 0; - - // The targets have been added. - ADD = 1; - - // The targets have been removed. - REMOVE = 2; - - // The targets reflect all changes committed before the targets were added - // to the stream. - // - // This will be sent after or with a `read_time` that is greater than or - // equal to the time at which the targets were added. - // - // Listeners can wait for this change if read-after-write semantics - // are desired. - CURRENT = 3; - - // The targets have been reset, and a new initial state for the targets - // will be returned in subsequent changes. - // - // After the initial state is complete, `CURRENT` will be returned even - // if the target was previously indicated to be `CURRENT`. - RESET = 4; - } - - // The type of change that occurred. - TargetChangeType target_change_type = 1; - - // The target IDs of targets that have changed. - // - // If empty, the change applies to all targets. - // - // The order of the target IDs is not defined. - repeated int32 target_ids = 2; - - // The error that resulted in this change, if applicable. - google.rpc.Status cause = 3; - - // A token that can be used to resume the stream for the given `target_ids`, - // or all targets if `target_ids` is empty. - // - // Not set on every target change. - bytes resume_token = 4; - - // The consistent `read_time` for the given `target_ids` (omitted when the - // target_ids are not at a consistent snapshot). - // - // The stream is guaranteed to send a `read_time` with `target_ids` empty - // whenever the entire stream reaches a new consistent snapshot. ADD, - // CURRENT, and RESET messages are guaranteed to (eventually) result in a - // new consistent snapshot (while NO_CHANGE and REMOVE messages are not). - // - // For a given stream, `read_time` is guaranteed to be monotonically - // increasing. - google.protobuf.Timestamp read_time = 6; -} - -// The request for [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. -message ListCollectionIdsRequest { - // Required. The parent document. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - // For example: - // `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom` - string parent = 1 [(google.api.field_behavior) = REQUIRED]; - - // The maximum number of results to return. - int32 page_size = 2; - - // A page token. Must be a value from - // [ListCollectionIdsResponse][google.firestore.v1beta1.ListCollectionIdsResponse]. - string page_token = 3; -} - -// The response from [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. -message ListCollectionIdsResponse { - // The collection ids. - repeated string collection_ids = 1; - - // A page token that may be used to continue the list. - string next_page_token = 2; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/firestore_admin.proto b/firestore/google/cloud/firestore_v1beta1/proto/firestore_admin.proto deleted file mode 100644 index 15ce94da6b68..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/firestore_admin.proto +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/api/annotations.proto"; -import "google/firestore/admin/v1beta1/index.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "FirestoreAdminProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - - -// The Cloud Firestore Admin API. -// -// This API provides several administrative services for Cloud Firestore. -// -// # Concepts -// -// Project, Database, Namespace, Collection, and Document are used as defined in -// the Google Cloud Firestore API. -// -// Operation: An Operation represents work being performed in the background. -// -// -// # Services -// -// ## Index -// -// The index service manages Cloud Firestore indexes. -// -// Index creation is performed asynchronously. -// An Operation resource is created for each such asynchronous operation. -// The state of the operation (including any errors encountered) -// may be queried via the Operation resource. -// -// ## Metadata -// -// Provides metadata and statistical information about data in Cloud Firestore. -// The data provided as part of this API may be stale. -// -// ## Operation -// -// The Operations collection provides a record of actions performed for the -// specified Project (including any Operations in progress). Operations are not -// created directly but through calls on other collections or resources. -// -// An Operation that is not yet done may be cancelled. The request to cancel is -// asynchronous and the Operation may continue to run for some time after the -// request to cancel is made. -// -// An Operation that is done may be deleted so that it is no longer listed as -// part of the Operation collection. -// -// Operations are created by service `FirestoreAdmin`, but are accessed via -// service `google.longrunning.Operations`. -service FirestoreAdmin { - // Creates the specified index. - // A newly created index's initial state is `CREATING`. On completion of the - // returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`. - // If the index already exists, the call will return an `ALREADY_EXISTS` - // status. - // - // During creation, the process could result in an error, in which case the - // index will move to the `ERROR` state. The process can be recovered by - // fixing the data that caused the error, removing the index with - // [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with - // [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. - // - // Indexes with a single field cannot be created. - rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{parent=projects/*/databases/*}/indexes" - body: "index" - }; - } - - // Lists the indexes that match the specified filters. - rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) { - option (google.api.http) = { - get: "/v1beta1/{parent=projects/*/databases/*}/indexes" - }; - } - - // Gets an index. - rpc GetIndex(GetIndexRequest) returns (Index) { - option (google.api.http) = { - get: "/v1beta1/{name=projects/*/databases/*/indexes/*}" - }; - } - - // Deletes an index. - rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1beta1/{name=projects/*/databases/*/indexes/*}" - }; - } - - // Exports a copy of all or a subset of documents from Google Cloud Firestore - // to another storage system, such as Google Cloud Storage. Recent updates to - // documents may not be reflected in the export. The export occurs in the - // background and its progress can be monitored and managed via the - // Operation resource that is created. The output of an export may only be - // used once the associated operation is done. If an export operation is - // cancelled before completion it may leave partial data behind in Google - // Cloud Storage. - rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/databases/*}:exportDocuments" - body: "*" - }; - } - - // Imports documents into Google Cloud Firestore. Existing documents with the - // same name are overwritten. The import occurs in the background and its - // progress can be monitored and managed via the Operation resource that is - // created. If an ImportDocuments operation is cancelled, it is possible - // that a subset of the data has already been imported to Cloud Firestore. - rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1beta1/{name=projects/*/databases/*}:importDocuments" - body: "*" - }; - } -} - -// Metadata for index operations. This metadata populates -// the metadata field of [google.longrunning.Operation][google.longrunning.Operation]. -message IndexOperationMetadata { - // The type of index operation. - enum OperationType { - // Unspecified. Never set by server. - OPERATION_TYPE_UNSPECIFIED = 0; - - // The operation is creating the index. Initiated by a `CreateIndex` call. - CREATING_INDEX = 1; - } - - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string index = 3; - - // The type of index operation. - OperationType operation_type = 4; - - // True if the [google.longrunning.Operation] was cancelled. If the - // cancellation is in progress, cancelled will be true but - // [google.longrunning.Operation.done][google.longrunning.Operation.done] will be false. - bool cancelled = 5; - - // Progress of the existing operation, measured in number of documents. - Progress document_progress = 6; -} - -// Measures the progress of a particular metric. -message Progress { - // An estimate of how much work has been completed. Note that this may be - // greater than `work_estimated`. - int64 work_completed = 1; - - // An estimate of how much work needs to be performed. Zero if the - // work estimate is unavailable. May change as work progresses. - int64 work_estimated = 2; -} - -// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex]. -message CreateIndexRequest { - // The name of the database this index will apply to. For example: - // `projects/{project_id}/databases/{database_id}` - string parent = 1; - - // The index to create. The name and state fields are output only and will be - // ignored. Certain single field indexes cannot be created or deleted. - Index index = 2; -} - -// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex]. -message GetIndexRequest { - // The name of the index. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string name = 1; -} - -// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. -message ListIndexesRequest { - // The database name. For example: - // `projects/{project_id}/databases/{database_id}` - string parent = 1; - - string filter = 2; - - // The standard List page size. - int32 page_size = 3; - - // The standard List page token. - string page_token = 4; -} - -// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex]. -message DeleteIndexRequest { - // The index name. For example: - // `projects/{project_id}/databases/{database_id}/indexes/{index_id}` - string name = 1; -} - -// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes]. -message ListIndexesResponse { - // The indexes. - repeated Index indexes = 1; - - // The standard List next-page token. - string next_page_token = 2; -} - -// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsRequest { - // Database to export. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to export. Unspecified means all collections. - repeated string collection_ids = 3; - - // The output URI. Currently only supports Google Cloud Storage URIs of the - // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name - // of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional - // Google Cloud Storage namespace path. When - // choosing a name, be sure to consider Google Cloud Storage naming - // guidelines: https://cloud.google.com/storage/docs/naming. - // If the URI is a bucket (without a namespace path), a prefix will be - // generated based on the start time. - string output_uri_prefix = 4; -} - -// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsRequest { - // Database to import into. Should be of the form: - // `projects/{project_id}/databases/{database_id}`. - string name = 1; - - // Which collection ids to import. Unspecified means all collections included - // in the import. - repeated string collection_ids = 3; - - // Location of the exported files. - // This must match the output_uri_prefix of an ExportDocumentsResponse from - // an export that has completed successfully. - // See: - // [google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix]. - string input_uri_prefix = 4; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Metadata for ExportDocuments operations. -message ExportDocumentsMetadata { - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // An estimate of the number of documents processed. - Progress progress_documents = 4; - - // An estimate of the number of bytes processed. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for ImportDocuments operations. -message ImportDocumentsMetadata { - // The time that work began on the operation. - google.protobuf.Timestamp start_time = 1; - - // The time the operation ended, either successfully or otherwise. Unset if - // the operation is still active. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // An estimate of the number of documents processed. - Progress progress_documents = 4; - - // An estimate of the number of bytes processed. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// The various possible states for an ongoing Operation. -enum OperationState { - // Unspecified. - STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2.py deleted file mode 100644 index 7d29eb882c51..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2.py +++ /dev/null @@ -1,3803 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/firestore.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.cloud.firestore_v1beta1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - query_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - write_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/firestore.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.firestore.v1beta1B\016FirestoreProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1" - ), - serialized_pb=_b( - '\n4google/cloud/firestore_v1beta1/proto/firestore.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.proto\x1a\x30google/cloud/firestore_v1beta1/proto/query.proto\x1a\x30google/cloud/firestore_v1beta1/proto/write.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xbd\x01\n\x12GetDocumentRequest\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x04mask\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12\x15\n\x0btransaction\x18\x03 \x01(\x0cH\x00\x12/\n\tread_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x16\n\x14\x63onsistency_selector"\xac\x02\n\x14ListDocumentsRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1a\n\rcollection_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\x12\x10\n\x08order_by\x18\x06 \x01(\t\x12\x34\n\x04mask\x18\x07 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12\x15\n\x0btransaction\x18\x08 \x01(\x0cH\x00\x12/\n\tread_time\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x14\n\x0cshow_missing\x18\x0c \x01(\x08\x42\x16\n\x14\x63onsistency_selector"g\n\x15ListDocumentsResponse\x12\x35\n\tdocuments\x18\x01 \x03(\x0b\x32".google.firestore.v1beta1.Document\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\xce\x01\n\x15\x43reateDocumentRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x1a\n\rcollection_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x0b\x64ocument_id\x18\x03 \x01(\t\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32".google.firestore.v1beta1.DocumentB\x03\xe0\x41\x02\x12\x34\n\x04mask\x18\x05 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask"\x87\x02\n\x15UpdateDocumentRequest\x12\x39\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.DocumentB\x03\xe0\x41\x02\x12;\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12\x34\n\x04mask\x18\x03 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12@\n\x10\x63urrent_document\x18\x04 \x01(\x0b\x32&.google.firestore.v1beta1.Precondition"l\n\x15\x44\x65leteDocumentRequest\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12@\n\x10\x63urrent_document\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.Precondition"\xa3\x02\n\x18\x42\x61tchGetDocumentsRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tdocuments\x18\x02 \x03(\t\x12\x34\n\x04mask\x18\x03 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12\x15\n\x0btransaction\x18\x04 \x01(\x0cH\x00\x12G\n\x0fnew_transaction\x18\x05 \x01(\x0b\x32,.google.firestore.v1beta1.TransactionOptionsH\x00\x12/\n\tread_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x16\n\x14\x63onsistency_selector"\xb1\x01\n\x19\x42\x61tchGetDocumentsResponse\x12\x33\n\x05\x66ound\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.DocumentH\x00\x12\x11\n\x07missing\x18\x02 \x01(\tH\x00\x12\x13\n\x0btransaction\x18\x03 \x01(\x0c\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x08\n\x06result"o\n\x17\x42\x65ginTransactionRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12=\n\x07options\x18\x02 \x01(\x0b\x32,.google.firestore.v1beta1.TransactionOptions"/\n\x18\x42\x65ginTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c"l\n\rCommitRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12/\n\x06writes\x18\x02 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Write\x12\x13\n\x0btransaction\x18\x03 \x01(\x0c"\x7f\n\x0e\x43ommitResponse\x12<\n\rwrite_results\x18\x01 \x03(\x0b\x32%.google.firestore.v1beta1.WriteResult\x12/\n\x0b\x63ommit_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"B\n\x0fRollbackRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0btransaction\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02"\xa4\x02\n\x0fRunQueryRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x45\n\x10structured_query\x18\x02 \x01(\x0b\x32).google.firestore.v1beta1.StructuredQueryH\x00\x12\x15\n\x0btransaction\x18\x05 \x01(\x0cH\x01\x12G\n\x0fnew_transaction\x18\x06 \x01(\x0b\x32,.google.firestore.v1beta1.TransactionOptionsH\x01\x12/\n\tread_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x42\x0c\n\nquery_typeB\x16\n\x14\x63onsistency_selector"\xa5\x01\n\x10RunQueryResponse\x12\x13\n\x0btransaction\x18\x02 \x01(\x0c\x12\x34\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.Document\x12-\n\tread_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fskipped_results\x18\x04 \x01(\x05"\xf2\x01\n\x0cWriteRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tstream_id\x18\x02 \x01(\t\x12/\n\x06writes\x18\x03 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Write\x12\x14\n\x0cstream_token\x18\x04 \x01(\x0c\x12\x42\n\x06labels\x18\x05 \x03(\x0b\x32\x32.google.firestore.v1beta1.WriteRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xa7\x01\n\rWriteResponse\x12\x11\n\tstream_id\x18\x01 \x01(\t\x12\x14\n\x0cstream_token\x18\x02 \x01(\x0c\x12<\n\rwrite_results\x18\x03 \x03(\x0b\x32%.google.firestore.v1beta1.WriteResult\x12/\n\x0b\x63ommit_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xfc\x01\n\rListenRequest\x12\x15\n\x08\x64\x61tabase\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x36\n\nadd_target\x18\x02 \x01(\x0b\x32 .google.firestore.v1beta1.TargetH\x00\x12\x17\n\rremove_target\x18\x03 \x01(\x05H\x00\x12\x43\n\x06labels\x18\x04 \x03(\x0b\x32\x33.google.firestore.v1beta1.ListenRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0f\n\rtarget_change"\xee\x02\n\x0eListenResponse\x12?\n\rtarget_change\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.TargetChangeH\x00\x12\x43\n\x0f\x64ocument_change\x18\x03 \x01(\x0b\x32(.google.firestore.v1beta1.DocumentChangeH\x00\x12\x43\n\x0f\x64ocument_delete\x18\x04 \x01(\x0b\x32(.google.firestore.v1beta1.DocumentDeleteH\x00\x12\x43\n\x0f\x64ocument_remove\x18\x06 \x01(\x0b\x32(.google.firestore.v1beta1.DocumentRemoveH\x00\x12;\n\x06\x66ilter\x18\x05 \x01(\x0b\x32).google.firestore.v1beta1.ExistenceFilterH\x00\x42\x0f\n\rresponse_type"\xb0\x03\n\x06Target\x12=\n\x05query\x18\x02 \x01(\x0b\x32,.google.firestore.v1beta1.Target.QueryTargetH\x00\x12\x45\n\tdocuments\x18\x03 \x01(\x0b\x32\x30.google.firestore.v1beta1.Target.DocumentsTargetH\x00\x12\x16\n\x0cresume_token\x18\x04 \x01(\x0cH\x01\x12/\n\tread_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x12\x11\n\ttarget_id\x18\x05 \x01(\x05\x12\x0c\n\x04once\x18\x06 \x01(\x08\x1a$\n\x0f\x44ocumentsTarget\x12\x11\n\tdocuments\x18\x02 \x03(\t\x1ar\n\x0bQueryTarget\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x45\n\x10structured_query\x18\x02 \x01(\x0b\x32).google.firestore.v1beta1.StructuredQueryH\x00\x42\x0c\n\nquery_typeB\r\n\x0btarget_typeB\r\n\x0bresume_type"\xaf\x02\n\x0cTargetChange\x12S\n\x12target_change_type\x18\x01 \x01(\x0e\x32\x37.google.firestore.v1beta1.TargetChange.TargetChangeType\x12\x12\n\ntarget_ids\x18\x02 \x03(\x05\x12!\n\x05\x63\x61use\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\x14\n\x0cresume_token\x18\x04 \x01(\x0c\x12-\n\tread_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"N\n\x10TargetChangeType\x12\r\n\tNO_CHANGE\x10\x00\x12\x07\n\x03\x41\x44\x44\x10\x01\x12\n\n\x06REMOVE\x10\x02\x12\x0b\n\x07\x43URRENT\x10\x03\x12\t\n\x05RESET\x10\x04"V\n\x18ListCollectionIdsRequest\x12\x13\n\x06parent\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"L\n\x19ListCollectionIdsResponse\x12\x16\n\x0e\x63ollection_ids\x18\x01 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x9b\x15\n\tFirestore\x12\x9e\x01\n\x0bGetDocument\x12,.google.firestore.v1beta1.GetDocumentRequest\x1a".google.firestore.v1beta1.Document"=\x82\xd3\xe4\x93\x02\x37\x12\x35/v1beta1/{name=projects/*/databases/*/documents/*/**}\x12\xc1\x01\n\rListDocuments\x12..google.firestore.v1beta1.ListDocumentsRequest\x1a/.google.firestore.v1beta1.ListDocumentsResponse"O\x82\xd3\xe4\x93\x02I\x12G/v1beta1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}\x12\xbe\x01\n\x0e\x43reateDocument\x12/.google.firestore.v1beta1.CreateDocumentRequest\x1a".google.firestore.v1beta1.Document"W\x82\xd3\xe4\x93\x02Q"E/v1beta1/{parent=projects/*/databases/*/documents/**}/{collection_id}:\x08\x64ocument\x12\xce\x01\n\x0eUpdateDocument\x12/.google.firestore.v1beta1.UpdateDocumentRequest\x1a".google.firestore.v1beta1.Document"g\x82\xd3\xe4\x93\x02J2>/v1beta1/{document.name=projects/*/databases/*/documents/*/**}:\x08\x64ocument\xda\x41\x14\x64ocument,update_mask\x12\x9f\x01\n\x0e\x44\x65leteDocument\x12/.google.firestore.v1beta1.DeleteDocumentRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02\x37*5/v1beta1/{name=projects/*/databases/*/documents/*/**}\xda\x41\x04name\x12\xc8\x01\n\x11\x42\x61tchGetDocuments\x12\x32.google.firestore.v1beta1.BatchGetDocumentsRequest\x1a\x33.google.firestore.v1beta1.BatchGetDocumentsResponse"H\x82\xd3\xe4\x93\x02\x42"=/v1beta1/{database=projects/*/databases/*}/documents:batchGet:\x01*0\x01\x12\xd6\x01\n\x10\x42\x65ginTransaction\x12\x31.google.firestore.v1beta1.BeginTransactionRequest\x1a\x32.google.firestore.v1beta1.BeginTransactionResponse"[\x82\xd3\xe4\x93\x02J"E/v1beta1/{database=projects/*/databases/*}/documents:beginTransaction:\x01*\xda\x41\x08\x64\x61tabase\x12\xb5\x01\n\x06\x43ommit\x12\'.google.firestore.v1beta1.CommitRequest\x1a(.google.firestore.v1beta1.CommitResponse"X\x82\xd3\xe4\x93\x02@";/v1beta1/{database=projects/*/databases/*}/documents:commit:\x01*\xda\x41\x0f\x64\x61tabase,writes\x12\xae\x01\n\x08Rollback\x12).google.firestore.v1beta1.RollbackRequest\x1a\x16.google.protobuf.Empty"_\x82\xd3\xe4\x93\x02\x42"=/v1beta1/{database=projects/*/databases/*}/documents:rollback:\x01*\xda\x41\x14\x64\x61tabase,transaction\x12\xf4\x01\n\x08RunQuery\x12).google.firestore.v1beta1.RunQueryRequest\x1a*.google.firestore.v1beta1.RunQueryResponse"\x8e\x01\x82\xd3\xe4\x93\x02\x87\x01";/v1beta1/{parent=projects/*/databases/*/documents}:runQuery:\x01*ZE"@/v1beta1/{parent=projects/*/databases/*/documents/*/**}:runQuery:\x01*0\x01\x12\xa3\x01\n\x05Write\x12&.google.firestore.v1beta1.WriteRequest\x1a\'.google.firestore.v1beta1.WriteResponse"E\x82\xd3\xe4\x93\x02?":/v1beta1/{database=projects/*/databases/*}/documents:write:\x01*(\x01\x30\x01\x12\xa7\x01\n\x06Listen\x12\'.google.firestore.v1beta1.ListenRequest\x1a(.google.firestore.v1beta1.ListenResponse"F\x82\xd3\xe4\x93\x02@";/v1beta1/{database=projects/*/databases/*}/documents:listen:\x01*(\x01\x30\x01\x12\xa8\x02\n\x11ListCollectionIds\x12\x32.google.firestore.v1beta1.ListCollectionIdsRequest\x1a\x33.google.firestore.v1beta1.ListCollectionIdsResponse"\xa9\x01\x82\xd3\xe4\x93\x02\x99\x01"D/v1beta1/{parent=projects/*/databases/*/documents}:listCollectionIds:\x01*ZN"I/v1beta1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds:\x01*\xda\x41\x06parent\x1av\xca\x41\x18\x66irestore.googleapis.com\xd2\x41Xhttps://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastoreB\xbc\x01\n\x1c\x63om.google.firestore.v1beta1B\x0e\x46irestoreProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_TARGETCHANGE_TARGETCHANGETYPE = _descriptor.EnumDescriptor( - name="TargetChangeType", - full_name="google.firestore.v1beta1.TargetChange.TargetChangeType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="NO_CHANGE", index=0, number=0, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ADD", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REMOVE", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="CURRENT", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="RESET", index=4, number=4, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4752, - serialized_end=4830, -) -_sym_db.RegisterEnumDescriptor(_TARGETCHANGE_TARGETCHANGETYPE) - - -_GETDOCUMENTREQUEST = _descriptor.Descriptor( - name="GetDocumentRequest", - full_name="google.firestore.v1beta1.GetDocumentRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.v1beta1.GetDocumentRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask", - full_name="google.firestore.v1beta1.GetDocumentRequest.mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.GetDocumentRequest.transaction", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.GetDocumentRequest.read_time", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="consistency_selector", - full_name="google.firestore.v1beta1.GetDocumentRequest.consistency_selector", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=462, - serialized_end=651, -) - - -_LISTDOCUMENTSREQUEST = _descriptor.Descriptor( - name="ListDocumentsRequest", - full_name="google.firestore.v1beta1.ListDocumentsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.v1beta1.ListDocumentsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_id", - full_name="google.firestore.v1beta1.ListDocumentsRequest.collection_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.firestore.v1beta1.ListDocumentsRequest.page_size", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.firestore.v1beta1.ListDocumentsRequest.page_token", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.firestore.v1beta1.ListDocumentsRequest.order_by", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask", - full_name="google.firestore.v1beta1.ListDocumentsRequest.mask", - index=5, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.ListDocumentsRequest.transaction", - index=6, - number=8, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.ListDocumentsRequest.read_time", - index=7, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="show_missing", - full_name="google.firestore.v1beta1.ListDocumentsRequest.show_missing", - index=8, - number=12, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="consistency_selector", - full_name="google.firestore.v1beta1.ListDocumentsRequest.consistency_selector", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=654, - serialized_end=954, -) - - -_LISTDOCUMENTSRESPONSE = _descriptor.Descriptor( - name="ListDocumentsResponse", - full_name="google.firestore.v1beta1.ListDocumentsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="documents", - full_name="google.firestore.v1beta1.ListDocumentsResponse.documents", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.firestore.v1beta1.ListDocumentsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=956, - serialized_end=1059, -) - - -_CREATEDOCUMENTREQUEST = _descriptor.Descriptor( - name="CreateDocumentRequest", - full_name="google.firestore.v1beta1.CreateDocumentRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.v1beta1.CreateDocumentRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="collection_id", - full_name="google.firestore.v1beta1.CreateDocumentRequest.collection_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document_id", - full_name="google.firestore.v1beta1.CreateDocumentRequest.document_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.CreateDocumentRequest.document", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask", - full_name="google.firestore.v1beta1.CreateDocumentRequest.mask", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1062, - serialized_end=1268, -) - - -_UPDATEDOCUMENTREQUEST = _descriptor.Descriptor( - name="UpdateDocumentRequest", - full_name="google.firestore.v1beta1.UpdateDocumentRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.UpdateDocumentRequest.document", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.firestore.v1beta1.UpdateDocumentRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask", - full_name="google.firestore.v1beta1.UpdateDocumentRequest.mask", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="current_document", - full_name="google.firestore.v1beta1.UpdateDocumentRequest.current_document", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1271, - serialized_end=1534, -) - - -_DELETEDOCUMENTREQUEST = _descriptor.Descriptor( - name="DeleteDocumentRequest", - full_name="google.firestore.v1beta1.DeleteDocumentRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.firestore.v1beta1.DeleteDocumentRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="current_document", - full_name="google.firestore.v1beta1.DeleteDocumentRequest.current_document", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1536, - serialized_end=1644, -) - - -_BATCHGETDOCUMENTSREQUEST = _descriptor.Descriptor( - name="BatchGetDocumentsRequest", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="documents", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.documents", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="mask", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.mask", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.transaction", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="new_transaction", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.new_transaction", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.read_time", - index=5, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="consistency_selector", - full_name="google.firestore.v1beta1.BatchGetDocumentsRequest.consistency_selector", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1647, - serialized_end=1938, -) - - -_BATCHGETDOCUMENTSRESPONSE = _descriptor.Descriptor( - name="BatchGetDocumentsResponse", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="found", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse.found", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="missing", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse.missing", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse.transaction", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse.read_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="result", - full_name="google.firestore.v1beta1.BatchGetDocumentsResponse.result", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1941, - serialized_end=2118, -) - - -_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor( - name="BeginTransactionRequest", - full_name="google.firestore.v1beta1.BeginTransactionRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.BeginTransactionRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="options", - full_name="google.firestore.v1beta1.BeginTransactionRequest.options", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2120, - serialized_end=2231, -) - - -_BEGINTRANSACTIONRESPONSE = _descriptor.Descriptor( - name="BeginTransactionResponse", - full_name="google.firestore.v1beta1.BeginTransactionResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.BeginTransactionResponse.transaction", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2233, - serialized_end=2280, -) - - -_COMMITREQUEST = _descriptor.Descriptor( - name="CommitRequest", - full_name="google.firestore.v1beta1.CommitRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.CommitRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="writes", - full_name="google.firestore.v1beta1.CommitRequest.writes", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.CommitRequest.transaction", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2282, - serialized_end=2390, -) - - -_COMMITRESPONSE = _descriptor.Descriptor( - name="CommitResponse", - full_name="google.firestore.v1beta1.CommitResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="write_results", - full_name="google.firestore.v1beta1.CommitResponse.write_results", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="commit_time", - full_name="google.firestore.v1beta1.CommitResponse.commit_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2392, - serialized_end=2519, -) - - -_ROLLBACKREQUEST = _descriptor.Descriptor( - name="RollbackRequest", - full_name="google.firestore.v1beta1.RollbackRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.RollbackRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.RollbackRequest.transaction", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2521, - serialized_end=2587, -) - - -_RUNQUERYREQUEST = _descriptor.Descriptor( - name="RunQueryRequest", - full_name="google.firestore.v1beta1.RunQueryRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.v1beta1.RunQueryRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="structured_query", - full_name="google.firestore.v1beta1.RunQueryRequest.structured_query", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.RunQueryRequest.transaction", - index=2, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="new_transaction", - full_name="google.firestore.v1beta1.RunQueryRequest.new_transaction", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.RunQueryRequest.read_time", - index=4, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="query_type", - full_name="google.firestore.v1beta1.RunQueryRequest.query_type", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="consistency_selector", - full_name="google.firestore.v1beta1.RunQueryRequest.consistency_selector", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=2590, - serialized_end=2882, -) - - -_RUNQUERYRESPONSE = _descriptor.Descriptor( - name="RunQueryResponse", - full_name="google.firestore.v1beta1.RunQueryResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.firestore.v1beta1.RunQueryResponse.transaction", - index=0, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.RunQueryResponse.document", - index=1, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.RunQueryResponse.read_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="skipped_results", - full_name="google.firestore.v1beta1.RunQueryResponse.skipped_results", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2885, - serialized_end=3050, -) - - -_WRITEREQUEST_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.firestore.v1beta1.WriteRequest.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.firestore.v1beta1.WriteRequest.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.firestore.v1beta1.WriteRequest.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3250, - serialized_end=3295, -) - -_WRITEREQUEST = _descriptor.Descriptor( - name="WriteRequest", - full_name="google.firestore.v1beta1.WriteRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.WriteRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="stream_id", - full_name="google.firestore.v1beta1.WriteRequest.stream_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="writes", - full_name="google.firestore.v1beta1.WriteRequest.writes", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="stream_token", - full_name="google.firestore.v1beta1.WriteRequest.stream_token", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.firestore.v1beta1.WriteRequest.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_WRITEREQUEST_LABELSENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3053, - serialized_end=3295, -) - - -_WRITERESPONSE = _descriptor.Descriptor( - name="WriteResponse", - full_name="google.firestore.v1beta1.WriteResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="stream_id", - full_name="google.firestore.v1beta1.WriteResponse.stream_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="stream_token", - full_name="google.firestore.v1beta1.WriteResponse.stream_token", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="write_results", - full_name="google.firestore.v1beta1.WriteResponse.write_results", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="commit_time", - full_name="google.firestore.v1beta1.WriteResponse.commit_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3298, - serialized_end=3465, -) - - -_LISTENREQUEST_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.firestore.v1beta1.ListenRequest.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.firestore.v1beta1.ListenRequest.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.firestore.v1beta1.ListenRequest.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3250, - serialized_end=3295, -) - -_LISTENREQUEST = _descriptor.Descriptor( - name="ListenRequest", - full_name="google.firestore.v1beta1.ListenRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="database", - full_name="google.firestore.v1beta1.ListenRequest.database", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="add_target", - full_name="google.firestore.v1beta1.ListenRequest.add_target", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="remove_target", - full_name="google.firestore.v1beta1.ListenRequest.remove_target", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.firestore.v1beta1.ListenRequest.labels", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_LISTENREQUEST_LABELSENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target_change", - full_name="google.firestore.v1beta1.ListenRequest.target_change", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=3468, - serialized_end=3720, -) - - -_LISTENRESPONSE = _descriptor.Descriptor( - name="ListenResponse", - full_name="google.firestore.v1beta1.ListenResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="target_change", - full_name="google.firestore.v1beta1.ListenResponse.target_change", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document_change", - full_name="google.firestore.v1beta1.ListenResponse.document_change", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document_delete", - full_name="google.firestore.v1beta1.ListenResponse.document_delete", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="document_remove", - full_name="google.firestore.v1beta1.ListenResponse.document_remove", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.firestore.v1beta1.ListenResponse.filter", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="response_type", - full_name="google.firestore.v1beta1.ListenResponse.response_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=3723, - serialized_end=4089, -) - - -_TARGET_DOCUMENTSTARGET = _descriptor.Descriptor( - name="DocumentsTarget", - full_name="google.firestore.v1beta1.Target.DocumentsTarget", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="documents", - full_name="google.firestore.v1beta1.Target.DocumentsTarget.documents", - index=0, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4342, - serialized_end=4378, -) - -_TARGET_QUERYTARGET = _descriptor.Descriptor( - name="QueryTarget", - full_name="google.firestore.v1beta1.Target.QueryTarget", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.v1beta1.Target.QueryTarget.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="structured_query", - full_name="google.firestore.v1beta1.Target.QueryTarget.structured_query", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="query_type", - full_name="google.firestore.v1beta1.Target.QueryTarget.query_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=4380, - serialized_end=4494, -) - -_TARGET = _descriptor.Descriptor( - name="Target", - full_name="google.firestore.v1beta1.Target", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="query", - full_name="google.firestore.v1beta1.Target.query", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="documents", - full_name="google.firestore.v1beta1.Target.documents", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resume_token", - full_name="google.firestore.v1beta1.Target.resume_token", - index=2, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.Target.read_time", - index=3, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="target_id", - full_name="google.firestore.v1beta1.Target.target_id", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="once", - full_name="google.firestore.v1beta1.Target.once", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_TARGET_DOCUMENTSTARGET, _TARGET_QUERYTARGET], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target_type", - full_name="google.firestore.v1beta1.Target.target_type", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="resume_type", - full_name="google.firestore.v1beta1.Target.resume_type", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=4092, - serialized_end=4524, -) - - -_TARGETCHANGE = _descriptor.Descriptor( - name="TargetChange", - full_name="google.firestore.v1beta1.TargetChange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="target_change_type", - full_name="google.firestore.v1beta1.TargetChange.target_change_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="target_ids", - full_name="google.firestore.v1beta1.TargetChange.target_ids", - index=1, - number=2, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cause", - full_name="google.firestore.v1beta1.TargetChange.cause", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resume_token", - full_name="google.firestore.v1beta1.TargetChange.resume_token", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.TargetChange.read_time", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_TARGETCHANGE_TARGETCHANGETYPE], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4527, - serialized_end=4830, -) - - -_LISTCOLLECTIONIDSREQUEST = _descriptor.Descriptor( - name="ListCollectionIdsRequest", - full_name="google.firestore.v1beta1.ListCollectionIdsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.firestore.v1beta1.ListCollectionIdsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.firestore.v1beta1.ListCollectionIdsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.firestore.v1beta1.ListCollectionIdsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4832, - serialized_end=4918, -) - - -_LISTCOLLECTIONIDSRESPONSE = _descriptor.Descriptor( - name="ListCollectionIdsResponse", - full_name="google.firestore.v1beta1.ListCollectionIdsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="collection_ids", - full_name="google.firestore.v1beta1.ListCollectionIdsResponse.collection_ids", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.firestore.v1beta1.ListCollectionIdsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4920, - serialized_end=4996, -) - -_GETDOCUMENTREQUEST.fields_by_name[ - "mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_GETDOCUMENTREQUEST.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_GETDOCUMENTREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _GETDOCUMENTREQUEST.fields_by_name["transaction"] -) -_GETDOCUMENTREQUEST.fields_by_name[ - "transaction" -].containing_oneof = _GETDOCUMENTREQUEST.oneofs_by_name["consistency_selector"] -_GETDOCUMENTREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _GETDOCUMENTREQUEST.fields_by_name["read_time"] -) -_GETDOCUMENTREQUEST.fields_by_name[ - "read_time" -].containing_oneof = _GETDOCUMENTREQUEST.oneofs_by_name["consistency_selector"] -_LISTDOCUMENTSREQUEST.fields_by_name[ - "mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_LISTDOCUMENTSREQUEST.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LISTDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _LISTDOCUMENTSREQUEST.fields_by_name["transaction"] -) -_LISTDOCUMENTSREQUEST.fields_by_name[ - "transaction" -].containing_oneof = _LISTDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"] -_LISTDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _LISTDOCUMENTSREQUEST.fields_by_name["read_time"] -) -_LISTDOCUMENTSREQUEST.fields_by_name[ - "read_time" -].containing_oneof = _LISTDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"] -_LISTDOCUMENTSRESPONSE.fields_by_name[ - "documents" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_CREATEDOCUMENTREQUEST.fields_by_name[ - "document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_CREATEDOCUMENTREQUEST.fields_by_name[ - "mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_UPDATEDOCUMENTREQUEST.fields_by_name[ - "document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_UPDATEDOCUMENTREQUEST.fields_by_name[ - "update_mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_UPDATEDOCUMENTREQUEST.fields_by_name[ - "mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_UPDATEDOCUMENTREQUEST.fields_by_name[ - "current_document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_DELETEDOCUMENTREQUEST.fields_by_name[ - "current_document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "new_transaction" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._TRANSACTIONOPTIONS -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _BATCHGETDOCUMENTSREQUEST.fields_by_name["transaction"] -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "transaction" -].containing_oneof = _BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"] -_BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _BATCHGETDOCUMENTSREQUEST.fields_by_name["new_transaction"] -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "new_transaction" -].containing_oneof = _BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"] -_BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _BATCHGETDOCUMENTSREQUEST.fields_by_name["read_time"] -) -_BATCHGETDOCUMENTSREQUEST.fields_by_name[ - "read_time" -].containing_oneof = _BATCHGETDOCUMENTSREQUEST.oneofs_by_name["consistency_selector"] -_BATCHGETDOCUMENTSRESPONSE.fields_by_name[ - "found" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_BATCHGETDOCUMENTSRESPONSE.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BATCHGETDOCUMENTSRESPONSE.oneofs_by_name["result"].fields.append( - _BATCHGETDOCUMENTSRESPONSE.fields_by_name["found"] -) -_BATCHGETDOCUMENTSRESPONSE.fields_by_name[ - "found" -].containing_oneof = _BATCHGETDOCUMENTSRESPONSE.oneofs_by_name["result"] -_BATCHGETDOCUMENTSRESPONSE.oneofs_by_name["result"].fields.append( - _BATCHGETDOCUMENTSRESPONSE.fields_by_name["missing"] -) -_BATCHGETDOCUMENTSRESPONSE.fields_by_name[ - "missing" -].containing_oneof = _BATCHGETDOCUMENTSRESPONSE.oneofs_by_name["result"] -_BEGINTRANSACTIONREQUEST.fields_by_name[ - "options" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._TRANSACTIONOPTIONS -) -_COMMITREQUEST.fields_by_name[ - "writes" -].message_type = google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._WRITE -_COMMITRESPONSE.fields_by_name[ - "write_results" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._WRITERESULT -) -_COMMITRESPONSE.fields_by_name[ - "commit_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_RUNQUERYREQUEST.fields_by_name[ - "structured_query" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2._STRUCTUREDQUERY -) -_RUNQUERYREQUEST.fields_by_name[ - "new_transaction" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._TRANSACTIONOPTIONS -) -_RUNQUERYREQUEST.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_RUNQUERYREQUEST.oneofs_by_name["query_type"].fields.append( - _RUNQUERYREQUEST.fields_by_name["structured_query"] -) -_RUNQUERYREQUEST.fields_by_name[ - "structured_query" -].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name["query_type"] -_RUNQUERYREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _RUNQUERYREQUEST.fields_by_name["transaction"] -) -_RUNQUERYREQUEST.fields_by_name[ - "transaction" -].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name["consistency_selector"] -_RUNQUERYREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _RUNQUERYREQUEST.fields_by_name["new_transaction"] -) -_RUNQUERYREQUEST.fields_by_name[ - "new_transaction" -].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name["consistency_selector"] -_RUNQUERYREQUEST.oneofs_by_name["consistency_selector"].fields.append( - _RUNQUERYREQUEST.fields_by_name["read_time"] -) -_RUNQUERYREQUEST.fields_by_name[ - "read_time" -].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name["consistency_selector"] -_RUNQUERYRESPONSE.fields_by_name[ - "document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_RUNQUERYRESPONSE.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WRITEREQUEST_LABELSENTRY.containing_type = _WRITEREQUEST -_WRITEREQUEST.fields_by_name[ - "writes" -].message_type = google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._WRITE -_WRITEREQUEST.fields_by_name["labels"].message_type = _WRITEREQUEST_LABELSENTRY -_WRITERESPONSE.fields_by_name[ - "write_results" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._WRITERESULT -) -_WRITERESPONSE.fields_by_name[ - "commit_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LISTENREQUEST_LABELSENTRY.containing_type = _LISTENREQUEST -_LISTENREQUEST.fields_by_name["add_target"].message_type = _TARGET -_LISTENREQUEST.fields_by_name["labels"].message_type = _LISTENREQUEST_LABELSENTRY -_LISTENREQUEST.oneofs_by_name["target_change"].fields.append( - _LISTENREQUEST.fields_by_name["add_target"] -) -_LISTENREQUEST.fields_by_name[ - "add_target" -].containing_oneof = _LISTENREQUEST.oneofs_by_name["target_change"] -_LISTENREQUEST.oneofs_by_name["target_change"].fields.append( - _LISTENREQUEST.fields_by_name["remove_target"] -) -_LISTENREQUEST.fields_by_name[ - "remove_target" -].containing_oneof = _LISTENREQUEST.oneofs_by_name["target_change"] -_LISTENRESPONSE.fields_by_name["target_change"].message_type = _TARGETCHANGE -_LISTENRESPONSE.fields_by_name[ - "document_change" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._DOCUMENTCHANGE -) -_LISTENRESPONSE.fields_by_name[ - "document_delete" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._DOCUMENTDELETE -) -_LISTENRESPONSE.fields_by_name[ - "document_remove" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._DOCUMENTREMOVE -) -_LISTENRESPONSE.fields_by_name[ - "filter" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_write__pb2._EXISTENCEFILTER -) -_LISTENRESPONSE.oneofs_by_name["response_type"].fields.append( - _LISTENRESPONSE.fields_by_name["target_change"] -) -_LISTENRESPONSE.fields_by_name[ - "target_change" -].containing_oneof = _LISTENRESPONSE.oneofs_by_name["response_type"] -_LISTENRESPONSE.oneofs_by_name["response_type"].fields.append( - _LISTENRESPONSE.fields_by_name["document_change"] -) -_LISTENRESPONSE.fields_by_name[ - "document_change" -].containing_oneof = _LISTENRESPONSE.oneofs_by_name["response_type"] -_LISTENRESPONSE.oneofs_by_name["response_type"].fields.append( - _LISTENRESPONSE.fields_by_name["document_delete"] -) -_LISTENRESPONSE.fields_by_name[ - "document_delete" -].containing_oneof = _LISTENRESPONSE.oneofs_by_name["response_type"] -_LISTENRESPONSE.oneofs_by_name["response_type"].fields.append( - _LISTENRESPONSE.fields_by_name["document_remove"] -) -_LISTENRESPONSE.fields_by_name[ - "document_remove" -].containing_oneof = _LISTENRESPONSE.oneofs_by_name["response_type"] -_LISTENRESPONSE.oneofs_by_name["response_type"].fields.append( - _LISTENRESPONSE.fields_by_name["filter"] -) -_LISTENRESPONSE.fields_by_name[ - "filter" -].containing_oneof = _LISTENRESPONSE.oneofs_by_name["response_type"] -_TARGET_DOCUMENTSTARGET.containing_type = _TARGET -_TARGET_QUERYTARGET.fields_by_name[ - "structured_query" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2._STRUCTUREDQUERY -) -_TARGET_QUERYTARGET.containing_type = _TARGET -_TARGET_QUERYTARGET.oneofs_by_name["query_type"].fields.append( - _TARGET_QUERYTARGET.fields_by_name["structured_query"] -) -_TARGET_QUERYTARGET.fields_by_name[ - "structured_query" -].containing_oneof = _TARGET_QUERYTARGET.oneofs_by_name["query_type"] -_TARGET.fields_by_name["query"].message_type = _TARGET_QUERYTARGET -_TARGET.fields_by_name["documents"].message_type = _TARGET_DOCUMENTSTARGET -_TARGET.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TARGET.oneofs_by_name["target_type"].fields.append(_TARGET.fields_by_name["query"]) -_TARGET.fields_by_name["query"].containing_oneof = _TARGET.oneofs_by_name["target_type"] -_TARGET.oneofs_by_name["target_type"].fields.append(_TARGET.fields_by_name["documents"]) -_TARGET.fields_by_name["documents"].containing_oneof = _TARGET.oneofs_by_name[ - "target_type" -] -_TARGET.oneofs_by_name["resume_type"].fields.append( - _TARGET.fields_by_name["resume_token"] -) -_TARGET.fields_by_name["resume_token"].containing_oneof = _TARGET.oneofs_by_name[ - "resume_type" -] -_TARGET.oneofs_by_name["resume_type"].fields.append(_TARGET.fields_by_name["read_time"]) -_TARGET.fields_by_name["read_time"].containing_oneof = _TARGET.oneofs_by_name[ - "resume_type" -] -_TARGETCHANGE.fields_by_name[ - "target_change_type" -].enum_type = _TARGETCHANGE_TARGETCHANGETYPE -_TARGETCHANGE.fields_by_name[ - "cause" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_TARGETCHANGE.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TARGETCHANGE_TARGETCHANGETYPE.containing_type = _TARGETCHANGE -DESCRIPTOR.message_types_by_name["GetDocumentRequest"] = _GETDOCUMENTREQUEST -DESCRIPTOR.message_types_by_name["ListDocumentsRequest"] = _LISTDOCUMENTSREQUEST -DESCRIPTOR.message_types_by_name["ListDocumentsResponse"] = _LISTDOCUMENTSRESPONSE -DESCRIPTOR.message_types_by_name["CreateDocumentRequest"] = _CREATEDOCUMENTREQUEST -DESCRIPTOR.message_types_by_name["UpdateDocumentRequest"] = _UPDATEDOCUMENTREQUEST -DESCRIPTOR.message_types_by_name["DeleteDocumentRequest"] = _DELETEDOCUMENTREQUEST -DESCRIPTOR.message_types_by_name["BatchGetDocumentsRequest"] = _BATCHGETDOCUMENTSREQUEST -DESCRIPTOR.message_types_by_name[ - "BatchGetDocumentsResponse" -] = _BATCHGETDOCUMENTSRESPONSE -DESCRIPTOR.message_types_by_name["BeginTransactionRequest"] = _BEGINTRANSACTIONREQUEST -DESCRIPTOR.message_types_by_name["BeginTransactionResponse"] = _BEGINTRANSACTIONRESPONSE -DESCRIPTOR.message_types_by_name["CommitRequest"] = _COMMITREQUEST -DESCRIPTOR.message_types_by_name["CommitResponse"] = _COMMITRESPONSE -DESCRIPTOR.message_types_by_name["RollbackRequest"] = _ROLLBACKREQUEST -DESCRIPTOR.message_types_by_name["RunQueryRequest"] = _RUNQUERYREQUEST -DESCRIPTOR.message_types_by_name["RunQueryResponse"] = _RUNQUERYRESPONSE -DESCRIPTOR.message_types_by_name["WriteRequest"] = _WRITEREQUEST -DESCRIPTOR.message_types_by_name["WriteResponse"] = _WRITERESPONSE -DESCRIPTOR.message_types_by_name["ListenRequest"] = _LISTENREQUEST -DESCRIPTOR.message_types_by_name["ListenResponse"] = _LISTENRESPONSE -DESCRIPTOR.message_types_by_name["Target"] = _TARGET -DESCRIPTOR.message_types_by_name["TargetChange"] = _TARGETCHANGE -DESCRIPTOR.message_types_by_name["ListCollectionIdsRequest"] = _LISTCOLLECTIONIDSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListCollectionIdsResponse" -] = _LISTCOLLECTIONIDSRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -GetDocumentRequest = _reflection.GeneratedProtocolMessageType( - "GetDocumentRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETDOCUMENTREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.GetDocument][google.firestore.v1beta1.Firestore.GetDocument]. - - - Attributes: - name: - Required. The resource name of the Document to get. In the - format: ``projects/{project_id}/databases/{database_id}/docume - nts/{document_path}``. - mask: - The fields to return. If not set, returns all fields. If the - document has a field that is not present in this mask, that - field will not be returned in the response. - consistency_selector: - The consistency mode for this transaction. If not set, - defaults to strong consistency. - transaction: - Reads the document in a transaction. - read_time: - Reads the version of the document at the given time. This may - not be older than 60 seconds. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.GetDocumentRequest) - ), -) -_sym_db.RegisterMessage(GetDocumentRequest) - -ListDocumentsRequest = _reflection.GeneratedProtocolMessageType( - "ListDocumentsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTDOCUMENTSREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. - - - Attributes: - parent: - Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{doc - ument_path}``. For example: ``projects/my- - project/databases/my-database/documents`` or ``projects/my- - project/databases/my-database/documents/chatrooms/my- - chatroom`` - collection_id: - Required. The collection ID, relative to ``parent``, to list. - For example: ``chatrooms`` or ``messages``. - page_size: - The maximum number of documents to return. - page_token: - The ``next_page_token`` value returned from a previous List - request, if any. - order_by: - The order to sort results by. For example: ``priority desc, - name``. - mask: - The fields to return. If not set, returns all fields. If a - document has a field that is not present in this mask, that - field will not be returned in the response. - consistency_selector: - The consistency mode for this transaction. If not set, - defaults to strong consistency. - transaction: - Reads documents in a transaction. - read_time: - Reads documents as they were at the given time. This may not - be older than 60 seconds. - show_missing: - If the list should show missing documents. A missing document - is a document that does not exist but has sub-documents. These - documents will be returned with a key but will not have - fields, [Document.create\_time][google.firestore.v1beta1.Docum - ent.create\_time], or [Document.update\_time][google.firestore - .v1beta1.Document.update\_time] set. Requests with - ``show_missing`` may not specify ``where`` or ``order_by``. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListDocumentsRequest) - ), -) -_sym_db.RegisterMessage(ListDocumentsRequest) - -ListDocumentsResponse = _reflection.GeneratedProtocolMessageType( - "ListDocumentsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTDOCUMENTSRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments]. - - - Attributes: - documents: - The Documents found. - next_page_token: - The next page token. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListDocumentsResponse) - ), -) -_sym_db.RegisterMessage(ListDocumentsResponse) - -CreateDocumentRequest = _reflection.GeneratedProtocolMessageType( - "CreateDocumentRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEDOCUMENTREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.CreateDocument][google.firestore.v1beta1.Firestore.CreateDocument]. - - - Attributes: - parent: - Required. The parent resource. For example: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/chat - rooms/{chatroom_id}`` - collection_id: - Required. The collection ID, relative to ``parent``, to list. - For example: ``chatrooms``. - document_id: - The client-assigned document ID to use for this document. - Optional. If not specified, an ID will be assigned by the - service. - document: - Required. The document to create. ``name`` must not be set. - mask: - The fields to return. If not set, returns all fields. If the - document has a field that is not present in this mask, that - field will not be returned in the response. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.CreateDocumentRequest) - ), -) -_sym_db.RegisterMessage(CreateDocumentRequest) - -UpdateDocumentRequest = _reflection.GeneratedProtocolMessageType( - "UpdateDocumentRequest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDOCUMENTREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.UpdateDocument][google.firestore.v1beta1.Firestore.UpdateDocument]. - - - Attributes: - document: - Required. The updated document. Creates the document if it - does not already exist. - update_mask: - The fields to update. None of the field paths in the mask may - contain a reserved name. If the document exists on the server - and has fields not referenced in the mask, they are left - unchanged. Fields referenced in the mask, but not present in - the input document, are deleted from the document on the - server. - mask: - The fields to return. If not set, returns all fields. If the - document has a field that is not present in this mask, that - field will not be returned in the response. - current_document: - An optional precondition on the document. The request will - fail if this is set and not met by the target document. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.UpdateDocumentRequest) - ), -) -_sym_db.RegisterMessage(UpdateDocumentRequest) - -DeleteDocumentRequest = _reflection.GeneratedProtocolMessageType( - "DeleteDocumentRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEDOCUMENTREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.DeleteDocument][google.firestore.v1beta1.Firestore.DeleteDocument]. - - - Attributes: - name: - Required. The resource name of the Document to delete. In the - format: ``projects/{project_id}/databases/{database_id}/docume - nts/{document_path}``. - current_document: - An optional precondition on the document. The request will - fail if this is set and not met by the target document. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DeleteDocumentRequest) - ), -) -_sym_db.RegisterMessage(DeleteDocumentRequest) - -BatchGetDocumentsRequest = _reflection.GeneratedProtocolMessageType( - "BatchGetDocumentsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_BATCHGETDOCUMENTSREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - documents: - The names of the documents to retrieve. In the format: ``proje - cts/{project_id}/databases/{database_id}/documents/{document_p - ath}``. The request will fail if any of the document is not a - child resource of the given ``database``. Duplicate names will - be elided. - mask: - The fields to return. If not set, returns all fields. If a - document has a field that is not present in this mask, that - field will not be returned in the response. - consistency_selector: - The consistency mode for this transaction. If not set, - defaults to strong consistency. - transaction: - Reads documents in a transaction. - new_transaction: - Starts a new transaction and reads the documents. Defaults to - a read-only transaction. The new transaction ID will be - returned as the first response in the stream. - read_time: - Reads documents as they were at the given time. This may not - be older than 60 seconds. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.BatchGetDocumentsRequest) - ), -) -_sym_db.RegisterMessage(BatchGetDocumentsRequest) - -BatchGetDocumentsResponse = _reflection.GeneratedProtocolMessageType( - "BatchGetDocumentsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_BATCHGETDOCUMENTSRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The streamed response for - [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments]. - - - Attributes: - result: - A single result. This can be empty if the server is just - returning a transaction. - found: - A document that was requested. - missing: - A document name that was requested but does not exist. In the - format: ``projects/{project_id}/databases/{database_id}/docume - nts/{document_path}``. - transaction: - The transaction that was started as part of this request. Will - only be set in the first response, and only if [BatchGetDocume - ntsRequest.new\_transaction][google.firestore.v1beta1.BatchGet - DocumentsRequest.new\_transaction] was set in the request. - read_time: - The time at which the document was read. This may be - monotically increasing, in this case the previous documents in - the result stream are guaranteed not to have changed between - their read\_time and this one. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.BatchGetDocumentsResponse) - ), -) -_sym_db.RegisterMessage(BatchGetDocumentsResponse) - -BeginTransactionRequest = _reflection.GeneratedProtocolMessageType( - "BeginTransactionRequest", - (_message.Message,), - dict( - DESCRIPTOR=_BEGINTRANSACTIONREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - options: - The options for the transaction. Defaults to a read-write - transaction. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.BeginTransactionRequest) - ), -) -_sym_db.RegisterMessage(BeginTransactionRequest) - -BeginTransactionResponse = _reflection.GeneratedProtocolMessageType( - "BeginTransactionResponse", - (_message.Message,), - dict( - DESCRIPTOR=_BEGINTRANSACTIONRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction]. - - - Attributes: - transaction: - The transaction that was started. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.BeginTransactionResponse) - ), -) -_sym_db.RegisterMessage(BeginTransactionResponse) - -CommitRequest = _reflection.GeneratedProtocolMessageType( - "CommitRequest", - (_message.Message,), - dict( - DESCRIPTOR=_COMMITREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - writes: - The writes to apply. Always executed atomically and in order. - transaction: - If set, applies all writes in this transaction, and commits - it. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.CommitRequest) - ), -) -_sym_db.RegisterMessage(CommitRequest) - -CommitResponse = _reflection.GeneratedProtocolMessageType( - "CommitResponse", - (_message.Message,), - dict( - DESCRIPTOR=_COMMITRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit]. - - - Attributes: - write_results: - The result of applying the writes. This i-th write result - corresponds to the i-th write in the request. - commit_time: - The time at which the commit occurred. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.CommitResponse) - ), -) -_sym_db.RegisterMessage(CommitResponse) - -RollbackRequest = _reflection.GeneratedProtocolMessageType( - "RollbackRequest", - (_message.Message,), - dict( - DESCRIPTOR=_ROLLBACKREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.Rollback][google.firestore.v1beta1.Firestore.Rollback]. - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - transaction: - Required. The transaction to roll back. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.RollbackRequest) - ), -) -_sym_db.RegisterMessage(RollbackRequest) - -RunQueryRequest = _reflection.GeneratedProtocolMessageType( - "RunQueryRequest", - (_message.Message,), - dict( - DESCRIPTOR=_RUNQUERYREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. - - - Attributes: - parent: - Required. The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{doc - ument_path}``. For example: ``projects/my- - project/databases/my-database/documents`` or ``projects/my- - project/databases/my-database/documents/chatrooms/my- - chatroom`` - query_type: - The query to run. - structured_query: - A structured query. - consistency_selector: - The consistency mode for this transaction. If not set, - defaults to strong consistency. - transaction: - Reads documents in a transaction. - new_transaction: - Starts a new transaction and reads the documents. Defaults to - a read-only transaction. The new transaction ID will be - returned as the first response in the stream. - read_time: - Reads documents as they were at the given time. This may not - be older than 60 seconds. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.RunQueryRequest) - ), -) -_sym_db.RegisterMessage(RunQueryRequest) - -RunQueryResponse = _reflection.GeneratedProtocolMessageType( - "RunQueryResponse", - (_message.Message,), - dict( - DESCRIPTOR=_RUNQUERYRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery]. - - - Attributes: - transaction: - The transaction that was started as part of this request. Can - only be set in the first response, and only if [RunQueryReques - t.new\_transaction][google.firestore.v1beta1.RunQueryRequest.n - ew\_transaction] was set in the request. If set, no other - fields will be set in this response. - document: - A query result. Not set when reporting partial progress. - read_time: - The time at which the document was read. This may be - monotonically increasing; in this case, the previous documents - in the result stream are guaranteed not to have changed - between their ``read_time`` and this one. If the query - returns no results, a response with ``read_time`` and no - ``document`` will be sent, and this represents the time at - which the query was run. - skipped_results: - The number of results that have been skipped due to an offset - between the last response and the current response. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.RunQueryResponse) - ), -) -_sym_db.RegisterMessage(RunQueryResponse) - -WriteRequest = _reflection.GeneratedProtocolMessageType( - "WriteRequest", - (_message.Message,), - dict( - LabelsEntry=_reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_WRITEREQUEST_LABELSENTRY, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2" - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.WriteRequest.LabelsEntry) - ), - ), - DESCRIPTOR=_WRITEREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. - - The first request creates a stream, or resumes an existing one from a - token. - - When creating a new stream, the server replies with a response - containing only an ID and a token, to use in the next request. - - When resuming a stream, the server first streams any responses later - than the given token, then a response containing only an up-to-date - token, to use in the next request. - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. This is - only required in the first message. - stream_id: - The ID of the write stream to resume. This may only be set in - the first message. When left empty, a new write stream will be - created. - writes: - The writes to apply. Always executed atomically and in order. - This must be empty on the first request. This may be empty on - the last request. This must not be empty on all other - requests. - stream_token: - A stream token that was previously sent by the server. The - client should set this field to the token from the most recent - [WriteResponse][google.firestore.v1beta1.WriteResponse] it has - received. This acknowledges that the client has received - responses up to this token. After sending this token, earlier - tokens may not be used anymore. The server may close the - stream if there are too many unacknowledged responses. Leave - this field unset when creating a new stream. To resume a - stream at a specific point, set this field and the - ``stream_id`` field. Leave this field unset when creating a - new stream. - labels: - Labels associated with this write request. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.WriteRequest) - ), -) -_sym_db.RegisterMessage(WriteRequest) -_sym_db.RegisterMessage(WriteRequest.LabelsEntry) - -WriteResponse = _reflection.GeneratedProtocolMessageType( - "WriteResponse", - (_message.Message,), - dict( - DESCRIPTOR=_WRITERESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.Write][google.firestore.v1beta1.Firestore.Write]. - - - Attributes: - stream_id: - The ID of the stream. Only set on the first message, when a - new stream was created. - stream_token: - A token that represents the position of this response in the - stream. This can be used by a client to resume the stream at - this point. This field is always set. - write_results: - The result of applying the writes. This i-th write result - corresponds to the i-th write in the request. - commit_time: - The time at which the commit occurred. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.WriteResponse) - ), -) -_sym_db.RegisterMessage(WriteResponse) - -ListenRequest = _reflection.GeneratedProtocolMessageType( - "ListenRequest", - (_message.Message,), - dict( - LabelsEntry=_reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_LISTENREQUEST_LABELSENTRY, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2" - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListenRequest.LabelsEntry) - ), - ), - DESCRIPTOR=_LISTENREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""A request for - [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen] - - - Attributes: - database: - Required. The database name. In the format: - ``projects/{project_id}/databases/{database_id}``. - target_change: - The supported target changes. - add_target: - A target to add to this stream. - remove_target: - The ID of a target to remove from this stream. - labels: - Labels associated with this target change. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListenRequest) - ), -) -_sym_db.RegisterMessage(ListenRequest) -_sym_db.RegisterMessage(ListenRequest.LabelsEntry) - -ListenResponse = _reflection.GeneratedProtocolMessageType( - "ListenResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTENRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response for - [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen]. - - - Attributes: - response_type: - The supported responses. - target_change: - Targets have changed. - document_change: - A [Document][google.firestore.v1beta1.Document] has changed. - document_delete: - A [Document][google.firestore.v1beta1.Document] has been - deleted. - document_remove: - A [Document][google.firestore.v1beta1.Document] has been - removed from a target (because it is no longer relevant to - that target). - filter: - A filter to apply to the set of documents previously returned - for the given target. Returned when documents may have been - removed from the given target, but the exact documents are - unknown. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListenResponse) - ), -) -_sym_db.RegisterMessage(ListenResponse) - -Target = _reflection.GeneratedProtocolMessageType( - "Target", - (_message.Message,), - dict( - DocumentsTarget=_reflection.GeneratedProtocolMessageType( - "DocumentsTarget", - (_message.Message,), - dict( - DESCRIPTOR=_TARGET_DOCUMENTSTARGET, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""A target specified by a set of documents names. - - - Attributes: - documents: - The names of the documents to retrieve. In the format: ``proje - cts/{project_id}/databases/{database_id}/documents/{document_p - ath}``. The request will fail if any of the document is not a - child resource of the given ``database``. Duplicate names will - be elided. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Target.DocumentsTarget) - ), - ), - QueryTarget=_reflection.GeneratedProtocolMessageType( - "QueryTarget", - (_message.Message,), - dict( - DESCRIPTOR=_TARGET_QUERYTARGET, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""A target specified by a query. - - - Attributes: - parent: - The parent resource name. In the format: - ``projects/{project_id}/databases/{database_id}/documents`` or - ``projects/{project_id}/databases/{database_id}/documents/{doc - ument_path}``. For example: ``projects/my- - project/databases/my-database/documents`` or ``projects/my- - project/databases/my-database/documents/chatrooms/my- - chatroom`` - query_type: - The query to run. - structured_query: - A structured query. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Target.QueryTarget) - ), - ), - DESCRIPTOR=_TARGET, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""A specification of a set of documents to listen to. - - - Attributes: - target_type: - The type of target to listen to. - query: - A target specified by a query. - documents: - A target specified by a set of document names. - resume_type: - When to start listening. If not specified, all matching - Documents are returned before any subsequent changes. - resume_token: - A resume token from a prior - [TargetChange][google.firestore.v1beta1.TargetChange] for an - identical target. Using a resume token with a different - target is unsupported and may fail. - read_time: - Start listening after a specific ``read_time``. The client - must know the state of matching documents at this time. - target_id: - The target ID that identifies the target on the stream. Must - be a positive number and non-zero. - once: - If the target should be removed once it is current and - consistent. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Target) - ), -) -_sym_db.RegisterMessage(Target) -_sym_db.RegisterMessage(Target.DocumentsTarget) -_sym_db.RegisterMessage(Target.QueryTarget) - -TargetChange = _reflection.GeneratedProtocolMessageType( - "TargetChange", - (_message.Message,), - dict( - DESCRIPTOR=_TARGETCHANGE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""Targets being watched have changed. - - - Attributes: - target_change_type: - The type of change that occurred. - target_ids: - The target IDs of targets that have changed. If empty, the - change applies to all targets. The order of the target IDs is - not defined. - cause: - The error that resulted in this change, if applicable. - resume_token: - A token that can be used to resume the stream for the given - ``target_ids``, or all targets if ``target_ids`` is empty. - Not set on every target change. - read_time: - The consistent ``read_time`` for the given ``target_ids`` - (omitted when the target\_ids are not at a consistent - snapshot). The stream is guaranteed to send a ``read_time`` - with ``target_ids`` empty whenever the entire stream reaches a - new consistent snapshot. ADD, CURRENT, and RESET messages are - guaranteed to (eventually) result in a new consistent snapshot - (while NO\_CHANGE and REMOVE messages are not). For a given - stream, ``read_time`` is guaranteed to be monotonically - increasing. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TargetChange) - ), -) -_sym_db.RegisterMessage(TargetChange) - -ListCollectionIdsRequest = _reflection.GeneratedProtocolMessageType( - "ListCollectionIdsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLLECTIONIDSREQUEST, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The request for - [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. - - - Attributes: - parent: - Required. The parent document. In the format: ``projects/{proj - ect_id}/databases/{database_id}/documents/{document_path}``. - For example: ``projects/my-project/databases/my- - database/documents/chatrooms/my-chatroom`` - page_size: - The maximum number of results to return. - page_token: - A page token. Must be a value from [ListCollectionIdsResponse] - [google.firestore.v1beta1.ListCollectionIdsResponse]. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListCollectionIdsRequest) - ), -) -_sym_db.RegisterMessage(ListCollectionIdsRequest) - -ListCollectionIdsResponse = _reflection.GeneratedProtocolMessageType( - "ListCollectionIdsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLLECTIONIDSRESPONSE, - __module__="google.cloud.firestore_v1beta1.proto.firestore_pb2", - __doc__="""The response from - [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds]. - - - Attributes: - collection_ids: - The collection ids. - next_page_token: - A page token that may be used to continue the list. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ListCollectionIdsResponse) - ), -) -_sym_db.RegisterMessage(ListCollectionIdsResponse) - - -DESCRIPTOR._options = None -_GETDOCUMENTREQUEST.fields_by_name["name"]._options = None -_LISTDOCUMENTSREQUEST.fields_by_name["parent"]._options = None -_LISTDOCUMENTSREQUEST.fields_by_name["collection_id"]._options = None -_CREATEDOCUMENTREQUEST.fields_by_name["parent"]._options = None -_CREATEDOCUMENTREQUEST.fields_by_name["collection_id"]._options = None -_CREATEDOCUMENTREQUEST.fields_by_name["document"]._options = None -_UPDATEDOCUMENTREQUEST.fields_by_name["document"]._options = None -_DELETEDOCUMENTREQUEST.fields_by_name["name"]._options = None -_BATCHGETDOCUMENTSREQUEST.fields_by_name["database"]._options = None -_BEGINTRANSACTIONREQUEST.fields_by_name["database"]._options = None -_COMMITREQUEST.fields_by_name["database"]._options = None -_ROLLBACKREQUEST.fields_by_name["database"]._options = None -_ROLLBACKREQUEST.fields_by_name["transaction"]._options = None -_RUNQUERYREQUEST.fields_by_name["parent"]._options = None -_WRITEREQUEST_LABELSENTRY._options = None -_WRITEREQUEST.fields_by_name["database"]._options = None -_LISTENREQUEST_LABELSENTRY._options = None -_LISTENREQUEST.fields_by_name["database"]._options = None -_LISTCOLLECTIONIDSREQUEST.fields_by_name["parent"]._options = None - -_FIRESTORE = _descriptor.ServiceDescriptor( - name="Firestore", - full_name="google.firestore.v1beta1.Firestore", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\030firestore.googleapis.com\322AXhttps://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore" - ), - serialized_start=4999, - serialized_end=7714, - methods=[ - _descriptor.MethodDescriptor( - name="GetDocument", - full_name="google.firestore.v1beta1.Firestore.GetDocument", - index=0, - containing_service=None, - input_type=_GETDOCUMENTREQUEST, - output_type=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT, - serialized_options=_b( - "\202\323\344\223\0027\0225/v1beta1/{name=projects/*/databases/*/documents/*/**}" - ), - ), - _descriptor.MethodDescriptor( - name="ListDocuments", - full_name="google.firestore.v1beta1.Firestore.ListDocuments", - index=1, - containing_service=None, - input_type=_LISTDOCUMENTSREQUEST, - output_type=_LISTDOCUMENTSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002I\022G/v1beta1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}" - ), - ), - _descriptor.MethodDescriptor( - name="CreateDocument", - full_name="google.firestore.v1beta1.Firestore.CreateDocument", - index=2, - containing_service=None, - input_type=_CREATEDOCUMENTREQUEST, - output_type=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT, - serialized_options=_b( - '\202\323\344\223\002Q"E/v1beta1/{parent=projects/*/databases/*/documents/**}/{collection_id}:\010document' - ), - ), - _descriptor.MethodDescriptor( - name="UpdateDocument", - full_name="google.firestore.v1beta1.Firestore.UpdateDocument", - index=3, - containing_service=None, - input_type=_UPDATEDOCUMENTREQUEST, - output_type=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT, - serialized_options=_b( - "\202\323\344\223\002J2>/v1beta1/{document.name=projects/*/databases/*/documents/*/**}:\010document\332A\024document,update_mask" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteDocument", - full_name="google.firestore.v1beta1.Firestore.DeleteDocument", - index=4, - containing_service=None, - input_type=_DELETEDOCUMENTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\0027*5/v1beta1/{name=projects/*/databases/*/documents/*/**}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="BatchGetDocuments", - full_name="google.firestore.v1beta1.Firestore.BatchGetDocuments", - index=5, - containing_service=None, - input_type=_BATCHGETDOCUMENTSREQUEST, - output_type=_BATCHGETDOCUMENTSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002B"=/v1beta1/{database=projects/*/databases/*}/documents:batchGet:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="BeginTransaction", - full_name="google.firestore.v1beta1.Firestore.BeginTransaction", - index=6, - containing_service=None, - input_type=_BEGINTRANSACTIONREQUEST, - output_type=_BEGINTRANSACTIONRESPONSE, - serialized_options=_b( - '\202\323\344\223\002J"E/v1beta1/{database=projects/*/databases/*}/documents:beginTransaction:\001*\332A\010database' - ), - ), - _descriptor.MethodDescriptor( - name="Commit", - full_name="google.firestore.v1beta1.Firestore.Commit", - index=7, - containing_service=None, - input_type=_COMMITREQUEST, - output_type=_COMMITRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v1beta1/{database=projects/*/databases/*}/documents:commit:\001*\332A\017database,writes' - ), - ), - _descriptor.MethodDescriptor( - name="Rollback", - full_name="google.firestore.v1beta1.Firestore.Rollback", - index=8, - containing_service=None, - input_type=_ROLLBACKREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - '\202\323\344\223\002B"=/v1beta1/{database=projects/*/databases/*}/documents:rollback:\001*\332A\024database,transaction' - ), - ), - _descriptor.MethodDescriptor( - name="RunQuery", - full_name="google.firestore.v1beta1.Firestore.RunQuery", - index=9, - containing_service=None, - input_type=_RUNQUERYREQUEST, - output_type=_RUNQUERYRESPONSE, - serialized_options=_b( - '\202\323\344\223\002\207\001";/v1beta1/{parent=projects/*/databases/*/documents}:runQuery:\001*ZE"@/v1beta1/{parent=projects/*/databases/*/documents/*/**}:runQuery:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="Write", - full_name="google.firestore.v1beta1.Firestore.Write", - index=10, - containing_service=None, - input_type=_WRITEREQUEST, - output_type=_WRITERESPONSE, - serialized_options=_b( - '\202\323\344\223\002?":/v1beta1/{database=projects/*/databases/*}/documents:write:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="Listen", - full_name="google.firestore.v1beta1.Firestore.Listen", - index=11, - containing_service=None, - input_type=_LISTENREQUEST, - output_type=_LISTENRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v1beta1/{database=projects/*/databases/*}/documents:listen:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="ListCollectionIds", - full_name="google.firestore.v1beta1.Firestore.ListCollectionIds", - index=12, - containing_service=None, - input_type=_LISTCOLLECTIONIDSREQUEST, - output_type=_LISTCOLLECTIONIDSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002\231\001"D/v1beta1/{parent=projects/*/databases/*/documents}:listCollectionIds:\001*ZN"I/v1beta1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds:\001*\332A\006parent' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_FIRESTORE) - -DESCRIPTOR.services_by_name["Firestore"] = _FIRESTORE - -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2_grpc.py deleted file mode 100644 index cf23b20c3884..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/firestore_pb2_grpc.py +++ /dev/null @@ -1,294 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - firestore_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class FirestoreStub(object): - """Specification of the Firestore API. - - The Cloud Firestore service. - - This service exposes several types of comparable timestamps: - - * `create_time` - The time at which a document was created. Changes only - when a document is deleted, then re-created. Increases in a strict - monotonic fashion. - * `update_time` - The time at which a document was last updated. Changes - every time a document is modified. Does not change when a write results - in no modifications. Increases in a strict monotonic fashion. - * `read_time` - The time at which a particular state was observed. Used - to denote a consistent snapshot of the database or the time at which a - Document was observed to not exist. - * `commit_time` - The time at which the writes in a transaction were - committed. Any read with an equal or greater `read_time` is guaranteed - to see the effects of the transaction. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetDocument = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/GetDocument", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.GetDocumentRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString, - ) - self.ListDocuments = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/ListDocuments", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.FromString, - ) - self.CreateDocument = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/CreateDocument", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString, - ) - self.UpdateDocument = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/UpdateDocument", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString, - ) - self.DeleteDocument = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/DeleteDocument", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.BatchGetDocuments = channel.unary_stream( - "/google.firestore.v1beta1.Firestore/BatchGetDocuments", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.FromString, - ) - self.BeginTransaction = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/BeginTransaction", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.FromString, - ) - self.Commit = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/Commit", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitResponse.FromString, - ) - self.Rollback = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/Rollback", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RollbackRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.RunQuery = channel.unary_stream( - "/google.firestore.v1beta1.Firestore/RunQuery", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryResponse.FromString, - ) - self.Write = channel.stream_stream( - "/google.firestore.v1beta1.Firestore/Write", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteResponse.FromString, - ) - self.Listen = channel.stream_stream( - "/google.firestore.v1beta1.Firestore/Listen", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenResponse.FromString, - ) - self.ListCollectionIds = channel.unary_unary( - "/google.firestore.v1beta1.Firestore/ListCollectionIds", - request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.FromString, - ) - - -class FirestoreServicer(object): - """Specification of the Firestore API. - - The Cloud Firestore service. - - This service exposes several types of comparable timestamps: - - * `create_time` - The time at which a document was created. Changes only - when a document is deleted, then re-created. Increases in a strict - monotonic fashion. - * `update_time` - The time at which a document was last updated. Changes - every time a document is modified. Does not change when a write results - in no modifications. Increases in a strict monotonic fashion. - * `read_time` - The time at which a particular state was observed. Used - to denote a consistent snapshot of the database or the time at which a - Document was observed to not exist. - * `commit_time` - The time at which the writes in a transaction were - committed. Any read with an equal or greater `read_time` is guaranteed - to see the effects of the transaction. - """ - - def GetDocument(self, request, context): - """Gets a single document. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListDocuments(self, request, context): - """Lists documents. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateDocument(self, request, context): - """Creates a new document. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateDocument(self, request, context): - """Updates or inserts a document. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteDocument(self, request, context): - """Deletes a document. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BatchGetDocuments(self, request, context): - """Gets multiple documents. - - Documents returned by this method are not guaranteed to be returned in the - same order that they were requested. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BeginTransaction(self, request, context): - """Starts a new transaction. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Commit(self, request, context): - """Commits a transaction, while optionally updating documents. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Rollback(self, request, context): - """Rolls back a transaction. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def RunQuery(self, request, context): - """Runs a query. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Write(self, request_iterator, context): - """Streams batches of document updates and deletes, in order. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Listen(self, request_iterator, context): - """Listens to changes. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListCollectionIds(self, request, context): - """Lists all the collection IDs underneath a document. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_FirestoreServicer_to_server(servicer, server): - rpc_method_handlers = { - "GetDocument": grpc.unary_unary_rpc_method_handler( - servicer.GetDocument, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.GetDocumentRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString, - ), - "ListDocuments": grpc.unary_unary_rpc_method_handler( - servicer.ListDocuments, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.SerializeToString, - ), - "CreateDocument": grpc.unary_unary_rpc_method_handler( - servicer.CreateDocument, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString, - ), - "UpdateDocument": grpc.unary_unary_rpc_method_handler( - servicer.UpdateDocument, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString, - ), - "DeleteDocument": grpc.unary_unary_rpc_method_handler( - servicer.DeleteDocument, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "BatchGetDocuments": grpc.unary_stream_rpc_method_handler( - servicer.BatchGetDocuments, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.SerializeToString, - ), - "BeginTransaction": grpc.unary_unary_rpc_method_handler( - servicer.BeginTransaction, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.SerializeToString, - ), - "Commit": grpc.unary_unary_rpc_method_handler( - servicer.Commit, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitResponse.SerializeToString, - ), - "Rollback": grpc.unary_unary_rpc_method_handler( - servicer.Rollback, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RollbackRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "RunQuery": grpc.unary_stream_rpc_method_handler( - servicer.RunQuery, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryResponse.SerializeToString, - ), - "Write": grpc.stream_stream_rpc_method_handler( - servicer.Write, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteResponse.SerializeToString, - ), - "Listen": grpc.stream_stream_rpc_method_handler( - servicer.Listen, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenResponse.SerializeToString, - ), - "ListCollectionIds": grpc.unary_unary_rpc_method_handler( - servicer.ListCollectionIds, - request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.FromString, - response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.firestore.v1beta1.Firestore", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/index.proto b/firestore/google/cloud/firestore_v1beta1/proto/index.proto deleted file mode 100644 index c5784e0eaab7..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/index.proto +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "IndexProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - - -// A field of an index. -message IndexField { - // The mode determines how a field is indexed. - enum Mode { - // The mode is unspecified. - MODE_UNSPECIFIED = 0; - - // The field's values are indexed so as to support sequencing in - // ascending order and also query by <, >, <=, >=, and =. - ASCENDING = 2; - - // The field's values are indexed so as to support sequencing in - // descending order and also query by <, >, <=, >=, and =. - DESCENDING = 3; - - // The field's array values are indexed so as to support membership using - // ARRAY_CONTAINS queries. - ARRAY_CONTAINS = 4; - } - - // The path of the field. Must match the field path specification described - // by [google.firestore.v1beta1.Document.fields][fields]. - // Special field path `__name__` may be used by itself or at the end of a - // path. `__type__` may be used only at the end of path. - string field_path = 1; - - // The field's mode. - Mode mode = 2; -} - -// An index definition. -message Index { - // The state of an index. During index creation, an index will be in the - // `CREATING` state. If the index is created successfully, it will transition - // to the `READY` state. If the index is not able to be created, it will - // transition to the `ERROR` state. - enum State { - // The state is unspecified. - STATE_UNSPECIFIED = 0; - - // The index is being created. - // There is an active long-running operation for the index. - // The index is updated when writing a document. - // Some index data may exist. - CREATING = 3; - - // The index is ready to be used. - // The index is updated when writing a document. - // The index is fully populated from all stored documents it applies to. - READY = 2; - - // The index was being created, but something went wrong. - // There is no active long-running operation for the index, - // and the most recently finished long-running operation failed. - // The index is not updated when writing a document. - // Some index data may exist. - ERROR = 5; - } - - // The resource name of the index. - // Output only. - string name = 1; - - // The collection ID to which this index applies. Required. - string collection_id = 2; - - // The fields to index. - repeated IndexField fields = 3; - - // The state of the index. - // Output only. - State state = 6; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/location.proto b/firestore/google/cloud/firestore_v1beta1/proto/location.proto deleted file mode 100644 index db7e8544b709..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/location.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta1; - -import "google/api/annotations.proto"; -import "google/type/latlng.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin"; -option java_multiple_files = true; -option java_outer_classname = "LocationProto"; -option java_package = "com.google.firestore.admin.v1beta1"; -option objc_class_prefix = "GCFS"; - - -// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata]. -message LocationMetadata { - -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/operation.proto b/firestore/google/cloud/firestore_v1beta1/proto/operation.proto deleted file mode 100644 index c2a1b001e6a8..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/operation.proto +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.admin.v1beta2; - -import "google/api/annotations.proto"; -import "google/firestore/admin/v1beta2/index.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2"; -option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin"; -option java_multiple_files = true; -option java_outer_classname = "OperationProto"; -option java_package = "com.google.firestore.admin.v1beta2"; -option objc_class_prefix = "GCFS"; - - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex]. -message IndexOperationMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The index resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}` - string index = 3; - - // The state of the operation. - OperationState state = 4; - - // The progress, in documents, of this operation. - Progress progress_documents = 5; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 6; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField]. -message FieldOperationMetadata { - // Information about an index configuration change. - message IndexConfigDelta { - // Specifies how the index is changing. - enum ChangeType { - // The type of change is not specified or known. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The single field index is being added. - ADD = 1; - - // The single field index is being removed. - REMOVE = 2; - } - - // Specifies how the index is changing. - ChangeType change_type = 1; - - // The index being changed. - Index index = 2; - } - - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The field resource that this operation is acting on. For example: - // `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}` - string field = 3; - - // A list of [IndexConfigDelta][google.firestore.admin.v1beta2.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this - // operation. - repeated IndexConfigDelta index_config_deltas = 4; - - // The state of the operation. - OperationState state = 5; - - // The progress, in documents, of this operation. - Progress document_progress = 6; - - // The progress, in bytes, of this operation. - Progress bytes_progress = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments]. -message ExportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the export operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being exported. - repeated string collection_ids = 6; - - // Where the entities are being exported to. - string output_uri_prefix = 7; -} - -// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from -// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments]. -message ImportDocumentsMetadata { - // The time this operation started. - google.protobuf.Timestamp start_time = 1; - - // The time this operation completed. Will be unset if operation still in - // progress. - google.protobuf.Timestamp end_time = 2; - - // The state of the import operation. - OperationState operation_state = 3; - - // The progress, in documents, of this operation. - Progress progress_documents = 4; - - // The progress, in bytes, of this operation. - Progress progress_bytes = 5; - - // Which collection ids are being imported. - repeated string collection_ids = 6; - - // The location of the documents being imported. - string input_uri_prefix = 7; -} - -// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field. -message ExportDocumentsResponse { - // Location of the output files. This can be used to begin an import - // into Cloud Firestore (this project or another project) after the operation - // completes successfully. - string output_uri_prefix = 1; -} - -// Describes the progress of the operation. -// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1beta2.Progress] -// is used. -message Progress { - // The amount of work estimated. - int64 estimated_work = 1; - - // The amount of work completed. - int64 completed_work = 2; -} - -// Describes the state of the operation. -enum OperationState { - // Unspecified. - OPERATION_STATE_UNSPECIFIED = 0; - - // Request is being prepared for processing. - INITIALIZING = 1; - - // Request is actively being processed. - PROCESSING = 2; - - // Request is in the process of being cancelled after user called - // google.longrunning.Operations.CancelOperation on the operation. - CANCELLING = 3; - - // Request has been processed and is in its finalization stage. - FINALIZING = 4; - - // Request has completed successfully. - SUCCESSFUL = 5; - - // Request has finished being processed, but encountered an error. - FAILED = 6; - - // Request has finished being cancelled after user called - // google.longrunning.Operations.CancelOperation. - CANCELLED = 7; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/query.proto b/firestore/google/cloud/firestore_v1beta1/proto/query.proto deleted file mode 100644 index 4f515fabe176..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/query.proto +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1beta1; - -import "google/firestore/v1beta1/document.proto"; -import "google/protobuf/wrappers.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "QueryProto"; -option java_package = "com.google.firestore.v1beta1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1beta1"; - -// A Firestore query. -message StructuredQuery { - // A selection of a collection, such as `messages as m1`. - message CollectionSelector { - // The collection ID. - // When set, selects only collections with this ID. - string collection_id = 2; - - // When false, selects only collections that are immediate children of - // the `parent` specified in the containing `RunQueryRequest`. - // When true, selects all descendant collections. - bool all_descendants = 3; - } - - // A filter. - message Filter { - // The type of filter. - oneof filter_type { - // A composite filter. - CompositeFilter composite_filter = 1; - - // A filter on a document field. - FieldFilter field_filter = 2; - - // A filter that takes exactly one argument. - UnaryFilter unary_filter = 3; - } - } - - // A filter that merges multiple other filters using the given operator. - message CompositeFilter { - // A composite filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // The results are required to satisfy each of the combined filters. - AND = 1; - } - - // The operator for combining multiple filters. - Operator op = 1; - - // The list of filters to combine. - // Must contain at least one filter. - repeated Filter filters = 2; - } - - // A filter on a specific field. - message FieldFilter { - // A field filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // Less than. Requires that the field come first in `order_by`. - LESS_THAN = 1; - - // Less than or equal. Requires that the field come first in `order_by`. - LESS_THAN_OR_EQUAL = 2; - - // Greater than. Requires that the field come first in `order_by`. - GREATER_THAN = 3; - - // Greater than or equal. Requires that the field come first in - // `order_by`. - GREATER_THAN_OR_EQUAL = 4; - - // Equal. - EQUAL = 5; - - // Contains. Requires that the field is an array. - ARRAY_CONTAINS = 7; - - // In. Requires that `value` is a non-empty ArrayValue with at most 10 - // values. - IN = 8; - - // Contains any. Requires that the field is an array and - // `value` is a non-empty ArrayValue with at most 10 values. - ARRAY_CONTAINS_ANY = 9; - } - - // The field to filter by. - FieldReference field = 1; - - // The operator to filter by. - Operator op = 2; - - // The value to compare to. - Value value = 3; - } - - // A filter with a single operand. - message UnaryFilter { - // A unary operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // Test if a field is equal to NaN. - IS_NAN = 2; - - // Test if an expression evaluates to Null. - IS_NULL = 3; - } - - // The unary operator to apply. - Operator op = 1; - - // The argument to the filter. - oneof operand_type { - // The field to which to apply the operator. - FieldReference field = 2; - } - } - - // An order on a field. - message Order { - // The field to order by. - FieldReference field = 1; - - // The direction to order by. Defaults to `ASCENDING`. - Direction direction = 2; - } - - // A reference to a field, such as `max(messages.time) as max_time`. - message FieldReference { - string field_path = 2; - } - - // The projection of document's fields to return. - message Projection { - // The fields to return. - // - // If empty, all fields are returned. To only return the name - // of the document, use `['__name__']`. - repeated FieldReference fields = 2; - } - - // A sort direction. - enum Direction { - // Unspecified. - DIRECTION_UNSPECIFIED = 0; - - // Ascending. - ASCENDING = 1; - - // Descending. - DESCENDING = 2; - } - - // The projection to return. - Projection select = 1; - - // The collections to query. - repeated CollectionSelector from = 2; - - // The filter to apply. - Filter where = 3; - - // The order to apply to the query results. - // - // Firestore guarantees a stable ordering through the following rules: - // - // * Any field required to appear in `order_by`, that is not already - // specified in `order_by`, is appended to the order in field name order - // by default. - // * If an order on `__name__` is not specified, it is appended by default. - // - // Fields are appended with the same sort direction as the last order - // specified, or 'ASCENDING' if no order was specified. For example: - // - // * `SELECT * FROM Foo ORDER BY A` becomes - // `SELECT * FROM Foo ORDER BY A, __name__` - // * `SELECT * FROM Foo ORDER BY A DESC` becomes - // `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC` - // * `SELECT * FROM Foo WHERE A > 1` becomes - // `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__` - repeated Order order_by = 4; - - // A starting point for the query results. - Cursor start_at = 7; - - // A end point for the query results. - Cursor end_at = 8; - - // The number of results to skip. - // - // Applies before limit, but after all other constraints. Must be >= 0 if - // specified. - int32 offset = 6; - - // The maximum number of results to return. - // - // Applies after all other constraints. - // Must be >= 0 if specified. - google.protobuf.Int32Value limit = 5; -} - -// A position in a query result set. -message Cursor { - // The values that represent a position, in the order they appear in - // the order by clause of a query. - // - // Can contain fewer values than specified in the order by clause. - repeated Value values = 1; - - // If the position is just before or just after the given values, relative - // to the sort order defined by the query. - bool before = 2; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/query_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/query_pb2.py deleted file mode 100644 index 154aab0d20fd..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/query_pb2.py +++ /dev/null @@ -1,1204 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/query.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/query.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.firestore.v1beta1B\nQueryProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/firestore_v1beta1/proto/query.proto\x12\x18google.firestore.v1beta1\x1a\x33google/cloud/firestore_v1beta1/proto/document.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto"\xd9\x0f\n\x0fStructuredQuery\x12\x44\n\x06select\x18\x01 \x01(\x0b\x32\x34.google.firestore.v1beta1.StructuredQuery.Projection\x12J\n\x04\x66rom\x18\x02 \x03(\x0b\x32<.google.firestore.v1beta1.StructuredQuery.CollectionSelector\x12?\n\x05where\x18\x03 \x01(\x0b\x32\x30.google.firestore.v1beta1.StructuredQuery.Filter\x12\x41\n\x08order_by\x18\x04 \x03(\x0b\x32/.google.firestore.v1beta1.StructuredQuery.Order\x12\x32\n\x08start_at\x18\x07 \x01(\x0b\x32 .google.firestore.v1beta1.Cursor\x12\x30\n\x06\x65nd_at\x18\x08 \x01(\x0b\x32 .google.firestore.v1beta1.Cursor\x12\x0e\n\x06offset\x18\x06 \x01(\x05\x12*\n\x05limit\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x1a\x44\n\x12\x43ollectionSelector\x12\x15\n\rcollection_id\x18\x02 \x01(\t\x12\x17\n\x0f\x61ll_descendants\x18\x03 \x01(\x08\x1a\x8c\x02\n\x06\x46ilter\x12U\n\x10\x63omposite_filter\x18\x01 \x01(\x0b\x32\x39.google.firestore.v1beta1.StructuredQuery.CompositeFilterH\x00\x12M\n\x0c\x66ield_filter\x18\x02 \x01(\x0b\x32\x35.google.firestore.v1beta1.StructuredQuery.FieldFilterH\x00\x12M\n\x0cunary_filter\x18\x03 \x01(\x0b\x32\x35.google.firestore.v1beta1.StructuredQuery.UnaryFilterH\x00\x42\r\n\x0b\x66ilter_type\x1a\xd3\x01\n\x0f\x43ompositeFilter\x12N\n\x02op\x18\x01 \x01(\x0e\x32\x42.google.firestore.v1beta1.StructuredQuery.CompositeFilter.Operator\x12\x41\n\x07\x66ilters\x18\x02 \x03(\x0b\x32\x30.google.firestore.v1beta1.StructuredQuery.Filter"-\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41ND\x10\x01\x1a\x8c\x03\n\x0b\x46ieldFilter\x12G\n\x05\x66ield\x18\x01 \x01(\x0b\x32\x38.google.firestore.v1beta1.StructuredQuery.FieldReference\x12J\n\x02op\x18\x02 \x01(\x0e\x32>.google.firestore.v1beta1.StructuredQuery.FieldFilter.Operator\x12.\n\x05value\x18\x03 \x01(\x0b\x32\x1f.google.firestore.v1beta1.Value"\xb7\x01\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x02\x12\x10\n\x0cGREATER_THAN\x10\x03\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x04\x12\t\n\x05\x45QUAL\x10\x05\x12\x12\n\x0e\x41RRAY_CONTAINS\x10\x07\x12\x06\n\x02IN\x10\x08\x12\x16\n\x12\x41RRAY_CONTAINS_ANY\x10\t\x1a\xf3\x01\n\x0bUnaryFilter\x12J\n\x02op\x18\x01 \x01(\x0e\x32>.google.firestore.v1beta1.StructuredQuery.UnaryFilter.Operator\x12I\n\x05\x66ield\x18\x02 \x01(\x0b\x32\x38.google.firestore.v1beta1.StructuredQuery.FieldReferenceH\x00"=\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\n\n\x06IS_NAN\x10\x02\x12\x0b\n\x07IS_NULL\x10\x03\x42\x0e\n\x0coperand_type\x1a\x98\x01\n\x05Order\x12G\n\x05\x66ield\x18\x01 \x01(\x0b\x32\x38.google.firestore.v1beta1.StructuredQuery.FieldReference\x12\x46\n\tdirection\x18\x02 \x01(\x0e\x32\x33.google.firestore.v1beta1.StructuredQuery.Direction\x1a$\n\x0e\x46ieldReference\x12\x12\n\nfield_path\x18\x02 \x01(\t\x1aV\n\nProjection\x12H\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x38.google.firestore.v1beta1.StructuredQuery.FieldReference"E\n\tDirection\x12\x19\n\x15\x44IRECTION_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x01\x12\x0e\n\nDESCENDING\x10\x02"I\n\x06\x43ursor\x12/\n\x06values\x18\x01 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Value\x12\x0e\n\x06\x62\x65\x66ore\x18\x02 \x01(\x08\x42\xb8\x01\n\x1c\x63om.google.firestore.v1beta1B\nQueryProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1b\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_STRUCTUREDQUERY_COMPOSITEFILTER_OPERATOR = _descriptor.EnumDescriptor( - name="Operator", - full_name="google.firestore.v1beta1.StructuredQuery.CompositeFilter.Operator", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="OPERATOR_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="AND", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1161, - serialized_end=1206, -) -_sym_db.RegisterEnumDescriptor(_STRUCTUREDQUERY_COMPOSITEFILTER_OPERATOR) - -_STRUCTUREDQUERY_FIELDFILTER_OPERATOR = _descriptor.EnumDescriptor( - name="Operator", - full_name="google.firestore.v1beta1.StructuredQuery.FieldFilter.Operator", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="OPERATOR_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="LESS_THAN", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="LESS_THAN_OR_EQUAL", - index=2, - number=2, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="GREATER_THAN", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="GREATER_THAN_OR_EQUAL", - index=4, - number=4, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="EQUAL", index=5, number=5, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ARRAY_CONTAINS", index=6, number=7, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="IN", index=7, number=8, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ARRAY_CONTAINS_ANY", - index=8, - number=9, - serialized_options=None, - type=None, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1422, - serialized_end=1605, -) -_sym_db.RegisterEnumDescriptor(_STRUCTUREDQUERY_FIELDFILTER_OPERATOR) - -_STRUCTUREDQUERY_UNARYFILTER_OPERATOR = _descriptor.EnumDescriptor( - name="Operator", - full_name="google.firestore.v1beta1.StructuredQuery.UnaryFilter.Operator", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="OPERATOR_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="IS_NAN", index=1, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="IS_NULL", index=2, number=3, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1774, - serialized_end=1835, -) -_sym_db.RegisterEnumDescriptor(_STRUCTUREDQUERY_UNARYFILTER_OPERATOR) - -_STRUCTUREDQUERY_DIRECTION = _descriptor.EnumDescriptor( - name="Direction", - full_name="google.firestore.v1beta1.StructuredQuery.Direction", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="DIRECTION_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="ASCENDING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DESCENDING", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2134, - serialized_end=2203, -) -_sym_db.RegisterEnumDescriptor(_STRUCTUREDQUERY_DIRECTION) - - -_STRUCTUREDQUERY_COLLECTIONSELECTOR = _descriptor.Descriptor( - name="CollectionSelector", - full_name="google.firestore.v1beta1.StructuredQuery.CollectionSelector", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="collection_id", - full_name="google.firestore.v1beta1.StructuredQuery.CollectionSelector.collection_id", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="all_descendants", - full_name="google.firestore.v1beta1.StructuredQuery.CollectionSelector.all_descendants", - index=1, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=653, - serialized_end=721, -) - -_STRUCTUREDQUERY_FILTER = _descriptor.Descriptor( - name="Filter", - full_name="google.firestore.v1beta1.StructuredQuery.Filter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="composite_filter", - full_name="google.firestore.v1beta1.StructuredQuery.Filter.composite_filter", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_filter", - full_name="google.firestore.v1beta1.StructuredQuery.Filter.field_filter", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="unary_filter", - full_name="google.firestore.v1beta1.StructuredQuery.Filter.unary_filter", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter_type", - full_name="google.firestore.v1beta1.StructuredQuery.Filter.filter_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=724, - serialized_end=992, -) - -_STRUCTUREDQUERY_COMPOSITEFILTER = _descriptor.Descriptor( - name="CompositeFilter", - full_name="google.firestore.v1beta1.StructuredQuery.CompositeFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="op", - full_name="google.firestore.v1beta1.StructuredQuery.CompositeFilter.op", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filters", - full_name="google.firestore.v1beta1.StructuredQuery.CompositeFilter.filters", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_STRUCTUREDQUERY_COMPOSITEFILTER_OPERATOR], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=995, - serialized_end=1206, -) - -_STRUCTUREDQUERY_FIELDFILTER = _descriptor.Descriptor( - name="FieldFilter", - full_name="google.firestore.v1beta1.StructuredQuery.FieldFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="google.firestore.v1beta1.StructuredQuery.FieldFilter.field", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="op", - full_name="google.firestore.v1beta1.StructuredQuery.FieldFilter.op", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.firestore.v1beta1.StructuredQuery.FieldFilter.value", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_STRUCTUREDQUERY_FIELDFILTER_OPERATOR], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1209, - serialized_end=1605, -) - -_STRUCTUREDQUERY_UNARYFILTER = _descriptor.Descriptor( - name="UnaryFilter", - full_name="google.firestore.v1beta1.StructuredQuery.UnaryFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="op", - full_name="google.firestore.v1beta1.StructuredQuery.UnaryFilter.op", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field", - full_name="google.firestore.v1beta1.StructuredQuery.UnaryFilter.field", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_STRUCTUREDQUERY_UNARYFILTER_OPERATOR], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="operand_type", - full_name="google.firestore.v1beta1.StructuredQuery.UnaryFilter.operand_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1608, - serialized_end=1851, -) - -_STRUCTUREDQUERY_ORDER = _descriptor.Descriptor( - name="Order", - full_name="google.firestore.v1beta1.StructuredQuery.Order", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="google.firestore.v1beta1.StructuredQuery.Order.field", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="direction", - full_name="google.firestore.v1beta1.StructuredQuery.Order.direction", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1854, - serialized_end=2006, -) - -_STRUCTUREDQUERY_FIELDREFERENCE = _descriptor.Descriptor( - name="FieldReference", - full_name="google.firestore.v1beta1.StructuredQuery.FieldReference", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field_path", - full_name="google.firestore.v1beta1.StructuredQuery.FieldReference.field_path", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2008, - serialized_end=2044, -) - -_STRUCTUREDQUERY_PROJECTION = _descriptor.Descriptor( - name="Projection", - full_name="google.firestore.v1beta1.StructuredQuery.Projection", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.firestore.v1beta1.StructuredQuery.Projection.fields", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2046, - serialized_end=2132, -) - -_STRUCTUREDQUERY = _descriptor.Descriptor( - name="StructuredQuery", - full_name="google.firestore.v1beta1.StructuredQuery", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="select", - full_name="google.firestore.v1beta1.StructuredQuery.select", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="from", - full_name="google.firestore.v1beta1.StructuredQuery.from", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="where", - full_name="google.firestore.v1beta1.StructuredQuery.where", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.firestore.v1beta1.StructuredQuery.order_by", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_at", - full_name="google.firestore.v1beta1.StructuredQuery.start_at", - index=4, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_at", - full_name="google.firestore.v1beta1.StructuredQuery.end_at", - index=5, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="offset", - full_name="google.firestore.v1beta1.StructuredQuery.offset", - index=6, - number=6, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="limit", - full_name="google.firestore.v1beta1.StructuredQuery.limit", - index=7, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _STRUCTUREDQUERY_COLLECTIONSELECTOR, - _STRUCTUREDQUERY_FILTER, - _STRUCTUREDQUERY_COMPOSITEFILTER, - _STRUCTUREDQUERY_FIELDFILTER, - _STRUCTUREDQUERY_UNARYFILTER, - _STRUCTUREDQUERY_ORDER, - _STRUCTUREDQUERY_FIELDREFERENCE, - _STRUCTUREDQUERY_PROJECTION, - ], - enum_types=[_STRUCTUREDQUERY_DIRECTION], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=194, - serialized_end=2203, -) - - -_CURSOR = _descriptor.Descriptor( - name="Cursor", - full_name="google.firestore.v1beta1.Cursor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="values", - full_name="google.firestore.v1beta1.Cursor.values", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="before", - full_name="google.firestore.v1beta1.Cursor.before", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2205, - serialized_end=2278, -) - -_STRUCTUREDQUERY_COLLECTIONSELECTOR.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "composite_filter" -].message_type = _STRUCTUREDQUERY_COMPOSITEFILTER -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "field_filter" -].message_type = _STRUCTUREDQUERY_FIELDFILTER -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "unary_filter" -].message_type = _STRUCTUREDQUERY_UNARYFILTER -_STRUCTUREDQUERY_FILTER.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"].fields.append( - _STRUCTUREDQUERY_FILTER.fields_by_name["composite_filter"] -) -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "composite_filter" -].containing_oneof = _STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"] -_STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"].fields.append( - _STRUCTUREDQUERY_FILTER.fields_by_name["field_filter"] -) -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "field_filter" -].containing_oneof = _STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"] -_STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"].fields.append( - _STRUCTUREDQUERY_FILTER.fields_by_name["unary_filter"] -) -_STRUCTUREDQUERY_FILTER.fields_by_name[ - "unary_filter" -].containing_oneof = _STRUCTUREDQUERY_FILTER.oneofs_by_name["filter_type"] -_STRUCTUREDQUERY_COMPOSITEFILTER.fields_by_name[ - "op" -].enum_type = _STRUCTUREDQUERY_COMPOSITEFILTER_OPERATOR -_STRUCTUREDQUERY_COMPOSITEFILTER.fields_by_name[ - "filters" -].message_type = _STRUCTUREDQUERY_FILTER -_STRUCTUREDQUERY_COMPOSITEFILTER.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_COMPOSITEFILTER_OPERATOR.containing_type = ( - _STRUCTUREDQUERY_COMPOSITEFILTER -) -_STRUCTUREDQUERY_FIELDFILTER.fields_by_name[ - "field" -].message_type = _STRUCTUREDQUERY_FIELDREFERENCE -_STRUCTUREDQUERY_FIELDFILTER.fields_by_name[ - "op" -].enum_type = _STRUCTUREDQUERY_FIELDFILTER_OPERATOR -_STRUCTUREDQUERY_FIELDFILTER.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -_STRUCTUREDQUERY_FIELDFILTER.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_FIELDFILTER_OPERATOR.containing_type = _STRUCTUREDQUERY_FIELDFILTER -_STRUCTUREDQUERY_UNARYFILTER.fields_by_name[ - "op" -].enum_type = _STRUCTUREDQUERY_UNARYFILTER_OPERATOR -_STRUCTUREDQUERY_UNARYFILTER.fields_by_name[ - "field" -].message_type = _STRUCTUREDQUERY_FIELDREFERENCE -_STRUCTUREDQUERY_UNARYFILTER.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_UNARYFILTER_OPERATOR.containing_type = _STRUCTUREDQUERY_UNARYFILTER -_STRUCTUREDQUERY_UNARYFILTER.oneofs_by_name["operand_type"].fields.append( - _STRUCTUREDQUERY_UNARYFILTER.fields_by_name["field"] -) -_STRUCTUREDQUERY_UNARYFILTER.fields_by_name[ - "field" -].containing_oneof = _STRUCTUREDQUERY_UNARYFILTER.oneofs_by_name["operand_type"] -_STRUCTUREDQUERY_ORDER.fields_by_name[ - "field" -].message_type = _STRUCTUREDQUERY_FIELDREFERENCE -_STRUCTUREDQUERY_ORDER.fields_by_name[ - "direction" -].enum_type = _STRUCTUREDQUERY_DIRECTION -_STRUCTUREDQUERY_ORDER.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_FIELDREFERENCE.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY_PROJECTION.fields_by_name[ - "fields" -].message_type = _STRUCTUREDQUERY_FIELDREFERENCE -_STRUCTUREDQUERY_PROJECTION.containing_type = _STRUCTUREDQUERY -_STRUCTUREDQUERY.fields_by_name["select"].message_type = _STRUCTUREDQUERY_PROJECTION -_STRUCTUREDQUERY.fields_by_name[ - "from" -].message_type = _STRUCTUREDQUERY_COLLECTIONSELECTOR -_STRUCTUREDQUERY.fields_by_name["where"].message_type = _STRUCTUREDQUERY_FILTER -_STRUCTUREDQUERY.fields_by_name["order_by"].message_type = _STRUCTUREDQUERY_ORDER -_STRUCTUREDQUERY.fields_by_name["start_at"].message_type = _CURSOR -_STRUCTUREDQUERY.fields_by_name["end_at"].message_type = _CURSOR -_STRUCTUREDQUERY.fields_by_name[ - "limit" -].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE -_STRUCTUREDQUERY_DIRECTION.containing_type = _STRUCTUREDQUERY -_CURSOR.fields_by_name[ - "values" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -DESCRIPTOR.message_types_by_name["StructuredQuery"] = _STRUCTUREDQUERY -DESCRIPTOR.message_types_by_name["Cursor"] = _CURSOR -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -StructuredQuery = _reflection.GeneratedProtocolMessageType( - "StructuredQuery", - (_message.Message,), - dict( - CollectionSelector=_reflection.GeneratedProtocolMessageType( - "CollectionSelector", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_COLLECTIONSELECTOR, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A selection of a collection, such as ``messages as m1``. - - - Attributes: - collection_id: - The collection ID. When set, selects only collections with - this ID. - all_descendants: - When false, selects only collections that are immediate - children of the ``parent`` specified in the containing - ``RunQueryRequest``. When true, selects all descendant - collections. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.CollectionSelector) - ), - ), - Filter=_reflection.GeneratedProtocolMessageType( - "Filter", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_FILTER, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A filter. - - - Attributes: - filter_type: - The type of filter. - composite_filter: - A composite filter. - field_filter: - A filter on a document field. - unary_filter: - A filter that takes exactly one argument. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.Filter) - ), - ), - CompositeFilter=_reflection.GeneratedProtocolMessageType( - "CompositeFilter", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_COMPOSITEFILTER, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A filter that merges multiple other filters using the - given operator. - - - Attributes: - op: - The operator for combining multiple filters. - filters: - The list of filters to combine. Must contain at least one - filter. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.CompositeFilter) - ), - ), - FieldFilter=_reflection.GeneratedProtocolMessageType( - "FieldFilter", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_FIELDFILTER, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A filter on a specific field. - - - Attributes: - field: - The field to filter by. - op: - The operator to filter by. - value: - The value to compare to. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.FieldFilter) - ), - ), - UnaryFilter=_reflection.GeneratedProtocolMessageType( - "UnaryFilter", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_UNARYFILTER, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A filter with a single operand. - - - Attributes: - op: - The unary operator to apply. - operand_type: - The argument to the filter. - field: - The field to which to apply the operator. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.UnaryFilter) - ), - ), - Order=_reflection.GeneratedProtocolMessageType( - "Order", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_ORDER, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""An order on a field. - - - Attributes: - field: - The field to order by. - direction: - The direction to order by. Defaults to ``ASCENDING``. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.Order) - ), - ), - FieldReference=_reflection.GeneratedProtocolMessageType( - "FieldReference", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_FIELDREFERENCE, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A reference to a field, such as - ``max(messages.time) as max_time``. - - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.FieldReference) - ), - ), - Projection=_reflection.GeneratedProtocolMessageType( - "Projection", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTUREDQUERY_PROJECTION, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""The projection of document's fields to return. - - - Attributes: - fields: - The fields to return. If empty, all fields are returned. To - only return the name of the document, use ``['__name__']``. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery.Projection) - ), - ), - DESCRIPTOR=_STRUCTUREDQUERY, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A Firestore query. - - - Attributes: - select: - The projection to return. - from: - The collections to query. - where: - The filter to apply. - order_by: - The order to apply to the query results. Firestore guarantees - a stable ordering through the following rules: - Any field - required to appear in ``order_by``, that is not already - specified in ``order_by``, is appended to the order in field - name order by default. - If an order on ``__name__`` is - not specified, it is appended by default. Fields are - appended with the same sort direction as the last order - specified, or 'ASCENDING' if no order was specified. For - example: - ``SELECT * FROM Foo ORDER BY A`` becomes - ``SELECT * FROM Foo ORDER BY A, __name__`` - ``SELECT * FROM - Foo ORDER BY A DESC`` becomes ``SELECT * FROM Foo ORDER BY - A DESC, __name__ DESC`` - ``SELECT * FROM Foo WHERE A > 1`` - becomes ``SELECT * FROM Foo WHERE A > 1 ORDER BY A, - __name__`` - start_at: - A starting point for the query results. - end_at: - A end point for the query results. - offset: - The number of results to skip. Applies before limit, but - after all other constraints. Must be >= 0 if specified. - limit: - The maximum number of results to return. Applies after all - other constraints. Must be >= 0 if specified. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.StructuredQuery) - ), -) -_sym_db.RegisterMessage(StructuredQuery) -_sym_db.RegisterMessage(StructuredQuery.CollectionSelector) -_sym_db.RegisterMessage(StructuredQuery.Filter) -_sym_db.RegisterMessage(StructuredQuery.CompositeFilter) -_sym_db.RegisterMessage(StructuredQuery.FieldFilter) -_sym_db.RegisterMessage(StructuredQuery.UnaryFilter) -_sym_db.RegisterMessage(StructuredQuery.Order) -_sym_db.RegisterMessage(StructuredQuery.FieldReference) -_sym_db.RegisterMessage(StructuredQuery.Projection) - -Cursor = _reflection.GeneratedProtocolMessageType( - "Cursor", - (_message.Message,), - dict( - DESCRIPTOR=_CURSOR, - __module__="google.cloud.firestore_v1beta1.proto.query_pb2", - __doc__="""A position in a query result set. - - - Attributes: - values: - The values that represent a position, in the order they appear - in the order by clause of a query. Can contain fewer values - than specified in the order by clause. - before: - If the position is just before or just after the given values, - relative to the sort order defined by the query. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Cursor) - ), -) -_sym_db.RegisterMessage(Cursor) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/query_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/query_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/query_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/proto/test_v1beta1_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/test_v1beta1_pb2.py deleted file mode 100644 index 18dc58706837..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/test_v1beta1_pb2.py +++ /dev/null @@ -1,2190 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: test_v1beta1.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1beta1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - firestore_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - query_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="test_v1beta1.proto", - package="tests.v1beta1", - syntax="proto3", - serialized_pb=_b( - '\n\x12test_v1beta1.proto\x12\rtests.v1beta1\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.proto\x1a\x34google/cloud/firestore_v1beta1/proto/firestore.proto\x1a\x30google/cloud/firestore_v1beta1/proto/query.proto\x1a\x1fgoogle/protobuf/timestamp.proto"/\n\tTestSuite\x12"\n\x05tests\x18\x01 \x03(\x0b\x32\x13.tests.v1beta1.Test"\x88\x03\n\x04Test\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12%\n\x03get\x18\x02 \x01(\x0b\x32\x16.tests.v1beta1.GetTestH\x00\x12+\n\x06\x63reate\x18\x03 \x01(\x0b\x32\x19.tests.v1beta1.CreateTestH\x00\x12%\n\x03set\x18\x04 \x01(\x0b\x32\x16.tests.v1beta1.SetTestH\x00\x12+\n\x06update\x18\x05 \x01(\x0b\x32\x19.tests.v1beta1.UpdateTestH\x00\x12\x36\n\x0cupdate_paths\x18\x06 \x01(\x0b\x32\x1e.tests.v1beta1.UpdatePathsTestH\x00\x12+\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x19.tests.v1beta1.DeleteTestH\x00\x12)\n\x05query\x18\x08 \x01(\x0b\x32\x18.tests.v1beta1.QueryTestH\x00\x12+\n\x06listen\x18\t \x01(\x0b\x32\x19.tests.v1beta1.ListenTestH\x00\x42\x06\n\x04test"^\n\x07GetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.firestore.v1beta1.GetDocumentRequest"\x81\x01\n\nCreateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t\x12\x38\n\x07request\x18\x03 \x01(\x0b\x32\'.google.firestore.v1beta1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\xa8\x01\n\x07SetTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12(\n\x06option\x18\x02 \x01(\x0b\x32\x18.tests.v1beta1.SetOption\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x38\n\x07request\x18\x04 \x01(\x0b\x32\'.google.firestore.v1beta1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xbf\x01\n\nUpdateTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12<\n\x0cprecondition\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.Precondition\x12\x11\n\tjson_data\x18\x03 \x01(\t\x12\x38\n\x07request\x18\x04 \x01(\x0b\x32\'.google.firestore.v1beta1.CommitRequest\x12\x10\n\x08is_error\x18\x05 \x01(\x08"\xf5\x01\n\x0fUpdatePathsTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12<\n\x0cprecondition\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.Precondition\x12-\n\x0b\x66ield_paths\x18\x03 \x03(\x0b\x32\x18.tests.v1beta1.FieldPath\x12\x13\n\x0bjson_values\x18\x04 \x03(\t\x12\x38\n\x07request\x18\x05 \x01(\x0b\x32\'.google.firestore.v1beta1.CommitRequest\x12\x10\n\x08is_error\x18\x06 \x01(\x08"\xac\x01\n\nDeleteTest\x12\x14\n\x0c\x64oc_ref_path\x18\x01 \x01(\t\x12<\n\x0cprecondition\x18\x02 \x01(\x0b\x32&.google.firestore.v1beta1.Precondition\x12\x38\n\x07request\x18\x03 \x01(\x0b\x32\'.google.firestore.v1beta1.CommitRequest\x12\x10\n\x08is_error\x18\x04 \x01(\x08"B\n\tSetOption\x12\x0b\n\x03\x61ll\x18\x01 \x01(\x08\x12(\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x18.tests.v1beta1.FieldPath"\x92\x01\n\tQueryTest\x12\x11\n\tcoll_path\x18\x01 \x01(\t\x12&\n\x07\x63lauses\x18\x02 \x03(\x0b\x32\x15.tests.v1beta1.Clause\x12\x38\n\x05query\x18\x03 \x01(\x0b\x32).google.firestore.v1beta1.StructuredQuery\x12\x10\n\x08is_error\x18\x04 \x01(\x08"\xe0\x02\n\x06\x43lause\x12\'\n\x06select\x18\x01 \x01(\x0b\x32\x15.tests.v1beta1.SelectH\x00\x12%\n\x05where\x18\x02 \x01(\x0b\x32\x14.tests.v1beta1.WhereH\x00\x12*\n\x08order_by\x18\x03 \x01(\x0b\x32\x16.tests.v1beta1.OrderByH\x00\x12\x10\n\x06offset\x18\x04 \x01(\x05H\x00\x12\x0f\n\x05limit\x18\x05 \x01(\x05H\x00\x12)\n\x08start_at\x18\x06 \x01(\x0b\x32\x15.tests.v1beta1.CursorH\x00\x12,\n\x0bstart_after\x18\x07 \x01(\x0b\x32\x15.tests.v1beta1.CursorH\x00\x12\'\n\x06\x65nd_at\x18\x08 \x01(\x0b\x32\x15.tests.v1beta1.CursorH\x00\x12+\n\nend_before\x18\t \x01(\x0b\x32\x15.tests.v1beta1.CursorH\x00\x42\x08\n\x06\x63lause"2\n\x06Select\x12(\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x18.tests.v1beta1.FieldPath"O\n\x05Where\x12&\n\x04path\x18\x01 \x01(\x0b\x32\x18.tests.v1beta1.FieldPath\x12\n\n\x02op\x18\x02 \x01(\t\x12\x12\n\njson_value\x18\x03 \x01(\t"D\n\x07OrderBy\x12&\n\x04path\x18\x01 \x01(\x0b\x32\x18.tests.v1beta1.FieldPath\x12\x11\n\tdirection\x18\x02 \x01(\t"O\n\x06\x43ursor\x12\x30\n\x0c\x64oc_snapshot\x18\x01 \x01(\x0b\x32\x1a.tests.v1beta1.DocSnapshot\x12\x13\n\x0bjson_values\x18\x02 \x03(\t".\n\x0b\x44ocSnapshot\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x11\n\tjson_data\x18\x02 \x01(\t"\x1a\n\tFieldPath\x12\r\n\x05\x66ield\x18\x01 \x03(\t"\x87\x01\n\nListenTest\x12;\n\tresponses\x18\x01 \x03(\x0b\x32(.google.firestore.v1beta1.ListenResponse\x12*\n\tsnapshots\x18\x02 \x03(\x0b\x32\x17.tests.v1beta1.Snapshot\x12\x10\n\x08is_error\x18\x03 \x01(\x08"\x96\x01\n\x08Snapshot\x12\x30\n\x04\x64ocs\x18\x01 \x03(\x0b\x32".google.firestore.v1beta1.Document\x12)\n\x07\x63hanges\x18\x02 \x03(\x0b\x32\x18.tests.v1beta1.DocChange\x12-\n\tread_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd3\x01\n\tDocChange\x12+\n\x04kind\x18\x01 \x01(\x0e\x32\x1d.tests.v1beta1.DocChange.Kind\x12/\n\x03\x64oc\x18\x02 \x01(\x0b\x32".google.firestore.v1beta1.Document\x12\x11\n\told_index\x18\x03 \x01(\x05\x12\x11\n\tnew_index\x18\x04 \x01(\x05"B\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42x\n&com.google.cloud.firestore.conformance\xaa\x02"Google.Cloud.Firestore.Tests.Proto\xca\x02(Google\\Cloud\\Firestore\\Tests\\Conformanceb\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_DOCCHANGE_KIND = _descriptor.EnumDescriptor( - name="Kind", - full_name="tests.v1beta1.DocChange.Kind", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="KIND_UNSPECIFIED", index=0, number=0, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ADDED", index=1, number=1, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="REMOVED", index=2, number=2, options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="MODIFIED", index=3, number=3, options=None, type=None - ), - ], - containing_type=None, - options=None, - serialized_start=3107, - serialized_end=3173, -) -_sym_db.RegisterEnumDescriptor(_DOCCHANGE_KIND) - - -_TESTSUITE = _descriptor.Descriptor( - name="TestSuite", - full_name="tests.v1beta1.TestSuite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="tests", - full_name="tests.v1beta1.TestSuite.tests", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=278, - serialized_end=325, -) - - -_TEST = _descriptor.Descriptor( - name="Test", - full_name="tests.v1beta1.Test", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="description", - full_name="tests.v1beta1.Test.description", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="get", - full_name="tests.v1beta1.Test.get", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="tests.v1beta1.Test.create", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="set", - full_name="tests.v1beta1.Test.set", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="tests.v1beta1.Test.update", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_paths", - full_name="tests.v1beta1.Test.update_paths", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="tests.v1beta1.Test.delete", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="tests.v1beta1.Test.query", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="listen", - full_name="tests.v1beta1.Test.listen", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="test", - full_name="tests.v1beta1.Test.test", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=328, - serialized_end=720, -) - - -_GETTEST = _descriptor.Descriptor( - name="GetTest", - full_name="tests.v1beta1.GetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.GetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.GetTest.request", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=722, - serialized_end=816, -) - - -_CREATETEST = _descriptor.Descriptor( - name="CreateTest", - full_name="tests.v1beta1.CreateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.CreateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1beta1.CreateTest.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.CreateTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.CreateTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=819, - serialized_end=948, -) - - -_SETTEST = _descriptor.Descriptor( - name="SetTest", - full_name="tests.v1beta1.SetTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.SetTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="option", - full_name="tests.v1beta1.SetTest.option", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1beta1.SetTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.SetTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.SetTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=951, - serialized_end=1119, -) - - -_UPDATETEST = _descriptor.Descriptor( - name="UpdateTest", - full_name="tests.v1beta1.UpdateTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.UpdateTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1beta1.UpdateTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1beta1.UpdateTest.json_data", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.UpdateTest.request", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.UpdateTest.is_error", - index=4, - number=5, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1122, - serialized_end=1313, -) - - -_UPDATEPATHSTEST = _descriptor.Descriptor( - name="UpdatePathsTest", - full_name="tests.v1beta1.UpdatePathsTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.UpdatePathsTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1beta1.UpdatePathsTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_paths", - full_name="tests.v1beta1.UpdatePathsTest.field_paths", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="tests.v1beta1.UpdatePathsTest.json_values", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.UpdatePathsTest.request", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.UpdatePathsTest.is_error", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1316, - serialized_end=1561, -) - - -_DELETETEST = _descriptor.Descriptor( - name="DeleteTest", - full_name="tests.v1beta1.DeleteTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_ref_path", - full_name="tests.v1beta1.DeleteTest.doc_ref_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="precondition", - full_name="tests.v1beta1.DeleteTest.precondition", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="request", - full_name="tests.v1beta1.DeleteTest.request", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.DeleteTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1564, - serialized_end=1736, -) - - -_SETOPTION = _descriptor.Descriptor( - name="SetOption", - full_name="tests.v1beta1.SetOption", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="all", - full_name="tests.v1beta1.SetOption.all", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="tests.v1beta1.SetOption.fields", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1738, - serialized_end=1804, -) - - -_QUERYTEST = _descriptor.Descriptor( - name="QueryTest", - full_name="tests.v1beta1.QueryTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="coll_path", - full_name="tests.v1beta1.QueryTest.coll_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="clauses", - full_name="tests.v1beta1.QueryTest.clauses", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query", - full_name="tests.v1beta1.QueryTest.query", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.QueryTest.is_error", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1807, - serialized_end=1953, -) - - -_CLAUSE = _descriptor.Descriptor( - name="Clause", - full_name="tests.v1beta1.Clause", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="select", - full_name="tests.v1beta1.Clause.select", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="where", - full_name="tests.v1beta1.Clause.where", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="tests.v1beta1.Clause.order_by", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="offset", - full_name="tests.v1beta1.Clause.offset", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="limit", - full_name="tests.v1beta1.Clause.limit", - index=4, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_at", - full_name="tests.v1beta1.Clause.start_at", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_after", - full_name="tests.v1beta1.Clause.start_after", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_at", - full_name="tests.v1beta1.Clause.end_at", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_before", - full_name="tests.v1beta1.Clause.end_before", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="clause", - full_name="tests.v1beta1.Clause.clause", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=1956, - serialized_end=2308, -) - - -_SELECT = _descriptor.Descriptor( - name="Select", - full_name="tests.v1beta1.Select", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="tests.v1beta1.Select.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2310, - serialized_end=2360, -) - - -_WHERE = _descriptor.Descriptor( - name="Where", - full_name="tests.v1beta1.Where", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1beta1.Where.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="op", - full_name="tests.v1beta1.Where.op", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_value", - full_name="tests.v1beta1.Where.json_value", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2362, - serialized_end=2441, -) - - -_ORDERBY = _descriptor.Descriptor( - name="OrderBy", - full_name="tests.v1beta1.OrderBy", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1beta1.OrderBy.path", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="direction", - full_name="tests.v1beta1.OrderBy.direction", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2443, - serialized_end=2511, -) - - -_CURSOR = _descriptor.Descriptor( - name="Cursor", - full_name="tests.v1beta1.Cursor", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="doc_snapshot", - full_name="tests.v1beta1.Cursor.doc_snapshot", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_values", - full_name="tests.v1beta1.Cursor.json_values", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2513, - serialized_end=2592, -) - - -_DOCSNAPSHOT = _descriptor.Descriptor( - name="DocSnapshot", - full_name="tests.v1beta1.DocSnapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="path", - full_name="tests.v1beta1.DocSnapshot.path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="json_data", - full_name="tests.v1beta1.DocSnapshot.json_data", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2594, - serialized_end=2640, -) - - -_FIELDPATH = _descriptor.Descriptor( - name="FieldPath", - full_name="tests.v1beta1.FieldPath", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field", - full_name="tests.v1beta1.FieldPath.field", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2642, - serialized_end=2668, -) - - -_LISTENTEST = _descriptor.Descriptor( - name="ListenTest", - full_name="tests.v1beta1.ListenTest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="responses", - full_name="tests.v1beta1.ListenTest.responses", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="snapshots", - full_name="tests.v1beta1.ListenTest.snapshots", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="is_error", - full_name="tests.v1beta1.ListenTest.is_error", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2671, - serialized_end=2806, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="tests.v1beta1.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="docs", - full_name="tests.v1beta1.Snapshot.docs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="changes", - full_name="tests.v1beta1.Snapshot.changes", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="tests.v1beta1.Snapshot.read_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2809, - serialized_end=2959, -) - - -_DOCCHANGE = _descriptor.Descriptor( - name="DocChange", - full_name="tests.v1beta1.DocChange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="kind", - full_name="tests.v1beta1.DocChange.kind", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="doc", - full_name="tests.v1beta1.DocChange.doc", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="old_index", - full_name="tests.v1beta1.DocChange.old_index", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="new_index", - full_name="tests.v1beta1.DocChange.new_index", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCCHANGE_KIND], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2962, - serialized_end=3173, -) - -_TESTSUITE.fields_by_name["tests"].message_type = _TEST -_TEST.fields_by_name["get"].message_type = _GETTEST -_TEST.fields_by_name["create"].message_type = _CREATETEST -_TEST.fields_by_name["set"].message_type = _SETTEST -_TEST.fields_by_name["update"].message_type = _UPDATETEST -_TEST.fields_by_name["update_paths"].message_type = _UPDATEPATHSTEST -_TEST.fields_by_name["delete"].message_type = _DELETETEST -_TEST.fields_by_name["query"].message_type = _QUERYTEST -_TEST.fields_by_name["listen"].message_type = _LISTENTEST -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["get"]) -_TEST.fields_by_name["get"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["create"]) -_TEST.fields_by_name["create"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["set"]) -_TEST.fields_by_name["set"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update"]) -_TEST.fields_by_name["update"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["update_paths"]) -_TEST.fields_by_name["update_paths"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["delete"]) -_TEST.fields_by_name["delete"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["query"]) -_TEST.fields_by_name["query"].containing_oneof = _TEST.oneofs_by_name["test"] -_TEST.oneofs_by_name["test"].fields.append(_TEST.fields_by_name["listen"]) -_TEST.fields_by_name["listen"].containing_oneof = _TEST.oneofs_by_name["test"] -_GETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._GETDOCUMENTREQUEST -) -_CREATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETTEST.fields_by_name["option"].message_type = _SETOPTION -_SETTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_UPDATEPATHSTEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_UPDATEPATHSTEST.fields_by_name["field_paths"].message_type = _FIELDPATH -_UPDATEPATHSTEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_DELETETEST.fields_by_name[ - "precondition" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_DELETETEST.fields_by_name[ - "request" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._COMMITREQUEST -) -_SETOPTION.fields_by_name["fields"].message_type = _FIELDPATH -_QUERYTEST.fields_by_name["clauses"].message_type = _CLAUSE -_QUERYTEST.fields_by_name[ - "query" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_query__pb2._STRUCTUREDQUERY -) -_CLAUSE.fields_by_name["select"].message_type = _SELECT -_CLAUSE.fields_by_name["where"].message_type = _WHERE -_CLAUSE.fields_by_name["order_by"].message_type = _ORDERBY -_CLAUSE.fields_by_name["start_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["start_after"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_at"].message_type = _CURSOR -_CLAUSE.fields_by_name["end_before"].message_type = _CURSOR -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["select"]) -_CLAUSE.fields_by_name["select"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["where"]) -_CLAUSE.fields_by_name["where"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["order_by"]) -_CLAUSE.fields_by_name["order_by"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["offset"]) -_CLAUSE.fields_by_name["offset"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["limit"]) -_CLAUSE.fields_by_name["limit"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_at"]) -_CLAUSE.fields_by_name["start_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["start_after"]) -_CLAUSE.fields_by_name["start_after"].containing_oneof = _CLAUSE.oneofs_by_name[ - "clause" -] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_at"]) -_CLAUSE.fields_by_name["end_at"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_CLAUSE.oneofs_by_name["clause"].fields.append(_CLAUSE.fields_by_name["end_before"]) -_CLAUSE.fields_by_name["end_before"].containing_oneof = _CLAUSE.oneofs_by_name["clause"] -_SELECT.fields_by_name["fields"].message_type = _FIELDPATH -_WHERE.fields_by_name["path"].message_type = _FIELDPATH -_ORDERBY.fields_by_name["path"].message_type = _FIELDPATH -_CURSOR.fields_by_name["doc_snapshot"].message_type = _DOCSNAPSHOT -_LISTENTEST.fields_by_name[ - "responses" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2._LISTENRESPONSE -) -_LISTENTEST.fields_by_name["snapshots"].message_type = _SNAPSHOT -_SNAPSHOT.fields_by_name[ - "docs" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_SNAPSHOT.fields_by_name["changes"].message_type = _DOCCHANGE -_SNAPSHOT.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DOCCHANGE.fields_by_name["kind"].enum_type = _DOCCHANGE_KIND -_DOCCHANGE.fields_by_name[ - "doc" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_DOCCHANGE_KIND.containing_type = _DOCCHANGE -DESCRIPTOR.message_types_by_name["TestSuite"] = _TESTSUITE -DESCRIPTOR.message_types_by_name["Test"] = _TEST -DESCRIPTOR.message_types_by_name["GetTest"] = _GETTEST -DESCRIPTOR.message_types_by_name["CreateTest"] = _CREATETEST -DESCRIPTOR.message_types_by_name["SetTest"] = _SETTEST -DESCRIPTOR.message_types_by_name["UpdateTest"] = _UPDATETEST -DESCRIPTOR.message_types_by_name["UpdatePathsTest"] = _UPDATEPATHSTEST -DESCRIPTOR.message_types_by_name["DeleteTest"] = _DELETETEST -DESCRIPTOR.message_types_by_name["SetOption"] = _SETOPTION -DESCRIPTOR.message_types_by_name["QueryTest"] = _QUERYTEST -DESCRIPTOR.message_types_by_name["Clause"] = _CLAUSE -DESCRIPTOR.message_types_by_name["Select"] = _SELECT -DESCRIPTOR.message_types_by_name["Where"] = _WHERE -DESCRIPTOR.message_types_by_name["OrderBy"] = _ORDERBY -DESCRIPTOR.message_types_by_name["Cursor"] = _CURSOR -DESCRIPTOR.message_types_by_name["DocSnapshot"] = _DOCSNAPSHOT -DESCRIPTOR.message_types_by_name["FieldPath"] = _FIELDPATH -DESCRIPTOR.message_types_by_name["ListenTest"] = _LISTENTEST -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["DocChange"] = _DOCCHANGE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TestSuite = _reflection.GeneratedProtocolMessageType( - "TestSuite", - (_message.Message,), - dict( - DESCRIPTOR=_TESTSUITE, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.TestSuite) - ), -) -_sym_db.RegisterMessage(TestSuite) - -Test = _reflection.GeneratedProtocolMessageType( - "Test", - (_message.Message,), - dict( - DESCRIPTOR=_TEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Test) - ), -) -_sym_db.RegisterMessage(Test) - -GetTest = _reflection.GeneratedProtocolMessageType( - "GetTest", - (_message.Message,), - dict( - DESCRIPTOR=_GETTEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.GetTest) - ), -) -_sym_db.RegisterMessage(GetTest) - -CreateTest = _reflection.GeneratedProtocolMessageType( - "CreateTest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.CreateTest) - ), -) -_sym_db.RegisterMessage(CreateTest) - -SetTest = _reflection.GeneratedProtocolMessageType( - "SetTest", - (_message.Message,), - dict( - DESCRIPTOR=_SETTEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.SetTest) - ), -) -_sym_db.RegisterMessage(SetTest) - -UpdateTest = _reflection.GeneratedProtocolMessageType( - "UpdateTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATETEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.UpdateTest) - ), -) -_sym_db.RegisterMessage(UpdateTest) - -UpdatePathsTest = _reflection.GeneratedProtocolMessageType( - "UpdatePathsTest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEPATHSTEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.UpdatePathsTest) - ), -) -_sym_db.RegisterMessage(UpdatePathsTest) - -DeleteTest = _reflection.GeneratedProtocolMessageType( - "DeleteTest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETETEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.DeleteTest) - ), -) -_sym_db.RegisterMessage(DeleteTest) - -SetOption = _reflection.GeneratedProtocolMessageType( - "SetOption", - (_message.Message,), - dict( - DESCRIPTOR=_SETOPTION, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.SetOption) - ), -) -_sym_db.RegisterMessage(SetOption) - -QueryTest = _reflection.GeneratedProtocolMessageType( - "QueryTest", - (_message.Message,), - dict( - DESCRIPTOR=_QUERYTEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.QueryTest) - ), -) -_sym_db.RegisterMessage(QueryTest) - -Clause = _reflection.GeneratedProtocolMessageType( - "Clause", - (_message.Message,), - dict( - DESCRIPTOR=_CLAUSE, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Clause) - ), -) -_sym_db.RegisterMessage(Clause) - -Select = _reflection.GeneratedProtocolMessageType( - "Select", - (_message.Message,), - dict( - DESCRIPTOR=_SELECT, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Select) - ), -) -_sym_db.RegisterMessage(Select) - -Where = _reflection.GeneratedProtocolMessageType( - "Where", - (_message.Message,), - dict( - DESCRIPTOR=_WHERE, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Where) - ), -) -_sym_db.RegisterMessage(Where) - -OrderBy = _reflection.GeneratedProtocolMessageType( - "OrderBy", - (_message.Message,), - dict( - DESCRIPTOR=_ORDERBY, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.OrderBy) - ), -) -_sym_db.RegisterMessage(OrderBy) - -Cursor = _reflection.GeneratedProtocolMessageType( - "Cursor", - (_message.Message,), - dict( - DESCRIPTOR=_CURSOR, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Cursor) - ), -) -_sym_db.RegisterMessage(Cursor) - -DocSnapshot = _reflection.GeneratedProtocolMessageType( - "DocSnapshot", - (_message.Message,), - dict( - DESCRIPTOR=_DOCSNAPSHOT, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.DocSnapshot) - ), -) -_sym_db.RegisterMessage(DocSnapshot) - -FieldPath = _reflection.GeneratedProtocolMessageType( - "FieldPath", - (_message.Message,), - dict( - DESCRIPTOR=_FIELDPATH, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.FieldPath) - ), -) -_sym_db.RegisterMessage(FieldPath) - -ListenTest = _reflection.GeneratedProtocolMessageType( - "ListenTest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTENTEST, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.ListenTest) - ), -) -_sym_db.RegisterMessage(ListenTest) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOT, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.Snapshot) - ), -) -_sym_db.RegisterMessage(Snapshot) - -DocChange = _reflection.GeneratedProtocolMessageType( - "DocChange", - (_message.Message,), - dict( - DESCRIPTOR=_DOCCHANGE, - __module__="test_v1beta1_pb2" - # @@protoc_insertion_point(class_scope:tests.v1beta1.DocChange) - ), -) -_sym_db.RegisterMessage(DocChange) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - '\n&com.google.cloud.firestore.conformance\252\002"Google.Cloud.Firestore.Tests.Proto\312\002(Google\\Cloud\\Firestore\\Tests\\Conformance' - ), -) -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/write.proto b/firestore/google/cloud/firestore_v1beta1/proto/write.proto deleted file mode 100644 index c02a2a8a1ac1..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/write.proto +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.firestore.v1beta1; - -import "google/firestore/v1beta1/common.proto"; -import "google/firestore/v1beta1/document.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Firestore.V1Beta1"; -option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore"; -option java_multiple_files = true; -option java_outer_classname = "WriteProto"; -option java_package = "com.google.firestore.v1beta1"; -option objc_class_prefix = "GCFS"; -option php_namespace = "Google\\Cloud\\Firestore\\V1beta1"; - -// A write on a document. -message Write { - // The operation to execute. - oneof operation { - // A document to write. - Document update = 1; - - // A document name to delete. In the format: - // `projects/{project_id}/databases/{database_id}/documents/{document_path}`. - string delete = 2; - - // Applies a transformation to a document. - // At most one `transform` per document is allowed in a given request. - // An `update` cannot follow a `transform` on the same document in a given - // request. - DocumentTransform transform = 6; - } - - // The fields to update in this write. - // - // This field can be set only when the operation is `update`. - // If the mask is not set for an `update` and the document exists, any - // existing data will be overwritten. - // If the mask is set and the document on the server has fields not covered by - // the mask, they are left unchanged. - // Fields referenced in the mask, but not present in the input document, are - // deleted from the document on the server. - // The field paths in this mask must not contain a reserved field name. - DocumentMask update_mask = 3; - - // An optional precondition on the document. - // - // The write will fail if this is set and not met by the target document. - Precondition current_document = 4; -} - -// A transformation of a document. -message DocumentTransform { - // A transformation of a field of the document. - message FieldTransform { - // A value that is calculated by the server. - enum ServerValue { - // Unspecified. This value must not be used. - SERVER_VALUE_UNSPECIFIED = 0; - - // The time at which the server processed the request, with millisecond - // precision. - REQUEST_TIME = 1; - } - - // The path of the field. See [Document.fields][google.firestore.v1beta1.Document.fields] for the field path syntax - // reference. - string field_path = 1; - - // The transformation to apply on the field. - oneof transform_type { - // Sets the field to the given server value. - ServerValue set_to_server_value = 2; - - // Adds the given value to the field's current value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If either of the given value or the current field value are doubles, - // both values will be interpreted as doubles. Double arithmetic and - // representation of double values follow IEEE 754 semantics. - // If there is positive/negative integer overflow, the field is resolved - // to the largest magnitude positive/negative integer. - Value increment = 3; - - // Sets the field to the maximum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the given value. - // If a maximum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the larger operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and - // zero input value is always the stored value. - // The maximum of any numeric value x and NaN is NaN. - Value maximum = 4; - - // Sets the field to the minimum of its current value and the given value. - // - // This must be an integer or a double value. - // If the field is not an integer or double, or if the field does not yet - // exist, the transformation will set the field to the input value. - // If a minimum operation is applied where the field and the input value - // are of mixed types (that is - one is an integer and one is a double) - // the field takes on the type of the smaller operand. If the operands are - // equivalent (e.g. 3 and 3.0), the field does not change. - // 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and - // zero input value is always the stored value. - // The minimum of any numeric value x and NaN is NaN. - Value minimum = 5; - - // Append the given elements in order if they are not already present in - // the current field value. - // If the field is not an array, or if the field does not yet exist, it is - // first set to the empty array. - // - // Equivalent numbers of different types (e.g. 3L and 3.0) are - // considered equal when checking if a value is missing. - // NaN is equal to NaN, and Null is equal to Null. - // If the input contains multiple equivalent values, only the first will - // be considered. - // - // The corresponding transform_result will be the null value. - ArrayValue append_missing_elements = 6; - - // Remove all of the given elements from the array in the field. - // If the field is not an array, or if the field does not yet exist, it is - // set to the empty array. - // - // Equivalent numbers of the different types (e.g. 3L and 3.0) are - // considered equal when deciding whether an element should be removed. - // NaN is equal to NaN, and Null is equal to Null. - // This will remove all equivalent values if there are duplicates. - // - // The corresponding transform_result will be the null value. - ArrayValue remove_all_from_array = 7; - } - } - - // The name of the document to transform. - string document = 1; - - // The list of transformations to apply to the fields of the document, in - // order. - // This must not be empty. - repeated FieldTransform field_transforms = 2; -} - -// The result of applying a write. -message WriteResult { - // The last update time of the document after applying the write. Not set - // after a `delete`. - // - // If the write did not actually change the document, this will be the - // previous update_time. - google.protobuf.Timestamp update_time = 1; - - // The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1beta1.DocumentTransform.FieldTransform], in the - // same order. - repeated Value transform_results = 2; -} - -// A [Document][google.firestore.v1beta1.Document] has changed. -// -// May be the result of multiple [writes][google.firestore.v1beta1.Write], including deletes, that -// ultimately resulted in a new value for the [Document][google.firestore.v1beta1.Document]. -// -// Multiple [DocumentChange][google.firestore.v1beta1.DocumentChange] messages may be returned for the same logical -// change, if multiple targets are affected. -message DocumentChange { - // The new state of the [Document][google.firestore.v1beta1.Document]. - // - // If `mask` is set, contains only fields that were updated or added. - Document document = 1; - - // A set of target IDs of targets that match this document. - repeated int32 target_ids = 5; - - // A set of target IDs for targets that no longer match this document. - repeated int32 removed_target_ids = 6; -} - -// A [Document][google.firestore.v1beta1.Document] has been deleted. -// -// May be the result of multiple [writes][google.firestore.v1beta1.Write], including updates, the -// last of which deleted the [Document][google.firestore.v1beta1.Document]. -// -// Multiple [DocumentDelete][google.firestore.v1beta1.DocumentDelete] messages may be returned for the same logical -// delete, if multiple targets are affected. -message DocumentDelete { - // The resource name of the [Document][google.firestore.v1beta1.Document] that was deleted. - string document = 1; - - // A set of target IDs for targets that previously matched this entity. - repeated int32 removed_target_ids = 6; - - // The read timestamp at which the delete was observed. - // - // Greater or equal to the `commit_time` of the delete. - google.protobuf.Timestamp read_time = 4; -} - -// A [Document][google.firestore.v1beta1.Document] has been removed from the view of the targets. -// -// Sent if the document is no longer relevant to a target and is out of view. -// Can be sent instead of a DocumentDelete or a DocumentChange if the server -// can not send the new value of the document. -// -// Multiple [DocumentRemove][google.firestore.v1beta1.DocumentRemove] messages may be returned for the same logical -// write or delete, if multiple targets are affected. -message DocumentRemove { - // The resource name of the [Document][google.firestore.v1beta1.Document] that has gone out of view. - string document = 1; - - // A set of target IDs for targets that previously matched this document. - repeated int32 removed_target_ids = 2; - - // The read timestamp at which the remove was observed. - // - // Greater or equal to the `commit_time` of the change/delete/remove. - google.protobuf.Timestamp read_time = 4; -} - -// A digest of all the documents that match a given target. -message ExistenceFilter { - // The target ID to which this filter applies. - int32 target_id = 1; - - // The total count of documents that match [target_id][google.firestore.v1beta1.ExistenceFilter.target_id]. - // - // If different from the count of documents in the client that match, the - // client must manually determine which documents no longer match the target. - int32 count = 2; -} diff --git a/firestore/google/cloud/firestore_v1beta1/proto/write_pb2.py b/firestore/google/cloud/firestore_v1beta1/proto/write_pb2.py deleted file mode 100644 index f9b0aa95cb69..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/write_pb2.py +++ /dev/null @@ -1,1156 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/firestore_v1beta1/proto/write.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.cloud.firestore_v1beta1.proto import ( - common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2, -) -from google.cloud.firestore_v1beta1.proto import ( - document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2, -) -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/firestore_v1beta1/proto/write.proto", - package="google.firestore.v1beta1", - syntax="proto3", - serialized_options=_b( - "\n\034com.google.firestore.v1beta1B\nWriteProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/firestore_v1beta1/proto/write.proto\x12\x18google.firestore.v1beta1\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x9d\x02\n\x05Write\x12\x34\n\x06update\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.DocumentH\x00\x12\x10\n\x06\x64\x65lete\x18\x02 \x01(\tH\x00\x12@\n\ttransform\x18\x06 \x01(\x0b\x32+.google.firestore.v1beta1.DocumentTransformH\x00\x12;\n\x0bupdate_mask\x18\x03 \x01(\x0b\x32&.google.firestore.v1beta1.DocumentMask\x12@\n\x10\x63urrent_document\x18\x04 \x01(\x0b\x32&.google.firestore.v1beta1.PreconditionB\x0b\n\toperation"\x88\x05\n\x11\x44ocumentTransform\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12T\n\x10\x66ield_transforms\x18\x02 \x03(\x0b\x32:.google.firestore.v1beta1.DocumentTransform.FieldTransform\x1a\x8a\x04\n\x0e\x46ieldTransform\x12\x12\n\nfield_path\x18\x01 \x01(\t\x12\x65\n\x13set_to_server_value\x18\x02 \x01(\x0e\x32\x46.google.firestore.v1beta1.DocumentTransform.FieldTransform.ServerValueH\x00\x12\x34\n\tincrement\x18\x03 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12\x32\n\x07maximum\x18\x04 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12\x32\n\x07minimum\x18\x05 \x01(\x0b\x32\x1f.google.firestore.v1beta1.ValueH\x00\x12G\n\x17\x61ppend_missing_elements\x18\x06 \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00\x12\x45\n\x15remove_all_from_array\x18\x07 \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00"=\n\x0bServerValue\x12\x1c\n\x18SERVER_VALUE_UNSPECIFIED\x10\x00\x12\x10\n\x0cREQUEST_TIME\x10\x01\x42\x10\n\x0etransform_type"z\n\x0bWriteResult\x12/\n\x0bupdate_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x11transform_results\x18\x02 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Value"v\n\x0e\x44ocumentChange\x12\x34\n\x08\x64ocument\x18\x01 \x01(\x0b\x32".google.firestore.v1beta1.Document\x12\x12\n\ntarget_ids\x18\x05 \x03(\x05\x12\x1a\n\x12removed_target_ids\x18\x06 \x03(\x05"m\n\x0e\x44ocumentDelete\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12\x1a\n\x12removed_target_ids\x18\x06 \x03(\x05\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"m\n\x0e\x44ocumentRemove\x12\x10\n\x08\x64ocument\x18\x01 \x01(\t\x12\x1a\n\x12removed_target_ids\x18\x02 \x03(\x05\x12-\n\tread_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"3\n\x0f\x45xistenceFilter\x12\x11\n\ttarget_id\x18\x01 \x01(\x05\x12\r\n\x05\x63ount\x18\x02 \x01(\x05\x42\xb8\x01\n\x1c\x63om.google.firestore.v1beta1B\nWriteProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1b\x06proto3' - ), - dependencies=[ - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE = _descriptor.EnumDescriptor( - name="ServerValue", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.ServerValue", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="SERVER_VALUE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="REQUEST_TIME", index=1, number=1, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1103, - serialized_end=1164, -) -_sym_db.RegisterEnumDescriptor(_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE) - - -_WRITE = _descriptor.Descriptor( - name="Write", - full_name="google.firestore.v1beta1.Write", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="update", - full_name="google.firestore.v1beta1.Write.update", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="google.firestore.v1beta1.Write.delete", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transform", - full_name="google.firestore.v1beta1.Write.transform", - index=2, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.firestore.v1beta1.Write.update_mask", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="current_document", - full_name="google.firestore.v1beta1.Write.current_document", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="operation", - full_name="google.firestore.v1beta1.Write.operation", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=246, - serialized_end=531, -) - - -_DOCUMENTTRANSFORM_FIELDTRANSFORM = _descriptor.Descriptor( - name="FieldTransform", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="field_path", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.field_path", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="set_to_server_value", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.set_to_server_value", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="increment", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.increment", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="maximum", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.maximum", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="minimum", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.minimum", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="append_missing_elements", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.append_missing_elements", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="remove_all_from_array", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.remove_all_from_array", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="transform_type", - full_name="google.firestore.v1beta1.DocumentTransform.FieldTransform.transform_type", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=660, - serialized_end=1182, -) - -_DOCUMENTTRANSFORM = _descriptor.Descriptor( - name="DocumentTransform", - full_name="google.firestore.v1beta1.DocumentTransform", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.DocumentTransform.document", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_transforms", - full_name="google.firestore.v1beta1.DocumentTransform.field_transforms", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_DOCUMENTTRANSFORM_FIELDTRANSFORM], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=534, - serialized_end=1182, -) - - -_WRITERESULT = _descriptor.Descriptor( - name="WriteResult", - full_name="google.firestore.v1beta1.WriteResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.firestore.v1beta1.WriteResult.update_time", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transform_results", - full_name="google.firestore.v1beta1.WriteResult.transform_results", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1184, - serialized_end=1306, -) - - -_DOCUMENTCHANGE = _descriptor.Descriptor( - name="DocumentChange", - full_name="google.firestore.v1beta1.DocumentChange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.DocumentChange.document", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="target_ids", - full_name="google.firestore.v1beta1.DocumentChange.target_ids", - index=1, - number=5, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="removed_target_ids", - full_name="google.firestore.v1beta1.DocumentChange.removed_target_ids", - index=2, - number=6, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1308, - serialized_end=1426, -) - - -_DOCUMENTDELETE = _descriptor.Descriptor( - name="DocumentDelete", - full_name="google.firestore.v1beta1.DocumentDelete", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.DocumentDelete.document", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="removed_target_ids", - full_name="google.firestore.v1beta1.DocumentDelete.removed_target_ids", - index=1, - number=6, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.DocumentDelete.read_time", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1428, - serialized_end=1537, -) - - -_DOCUMENTREMOVE = _descriptor.Descriptor( - name="DocumentRemove", - full_name="google.firestore.v1beta1.DocumentRemove", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="document", - full_name="google.firestore.v1beta1.DocumentRemove.document", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="removed_target_ids", - full_name="google.firestore.v1beta1.DocumentRemove.removed_target_ids", - index=1, - number=2, - type=5, - cpp_type=1, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_time", - full_name="google.firestore.v1beta1.DocumentRemove.read_time", - index=2, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1539, - serialized_end=1648, -) - - -_EXISTENCEFILTER = _descriptor.Descriptor( - name="ExistenceFilter", - full_name="google.firestore.v1beta1.ExistenceFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="target_id", - full_name="google.firestore.v1beta1.ExistenceFilter.target_id", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="count", - full_name="google.firestore.v1beta1.ExistenceFilter.count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1650, - serialized_end=1701, -) - -_WRITE.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_WRITE.fields_by_name["transform"].message_type = _DOCUMENTTRANSFORM -_WRITE.fields_by_name[ - "update_mask" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._DOCUMENTMASK -) -_WRITE.fields_by_name[ - "current_document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2._PRECONDITION -) -_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["update"]) -_WRITE.fields_by_name["update"].containing_oneof = _WRITE.oneofs_by_name["operation"] -_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["delete"]) -_WRITE.fields_by_name["delete"].containing_oneof = _WRITE.oneofs_by_name["operation"] -_WRITE.oneofs_by_name["operation"].fields.append(_WRITE.fields_by_name["transform"]) -_WRITE.fields_by_name["transform"].containing_oneof = _WRITE.oneofs_by_name["operation"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "set_to_server_value" -].enum_type = _DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "increment" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "maximum" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "minimum" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "append_missing_elements" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._ARRAYVALUE -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "remove_all_from_array" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._ARRAYVALUE -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.containing_type = _DOCUMENTTRANSFORM -_DOCUMENTTRANSFORM_FIELDTRANSFORM_SERVERVALUE.containing_type = ( - _DOCUMENTTRANSFORM_FIELDTRANSFORM -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["set_to_server_value"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "set_to_server_value" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["increment"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "increment" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["maximum"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "maximum" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["minimum"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "minimum" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["append_missing_elements"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "append_missing_elements" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"].fields.append( - _DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name["remove_all_from_array"] -) -_DOCUMENTTRANSFORM_FIELDTRANSFORM.fields_by_name[ - "remove_all_from_array" -].containing_oneof = _DOCUMENTTRANSFORM_FIELDTRANSFORM.oneofs_by_name["transform_type"] -_DOCUMENTTRANSFORM.fields_by_name[ - "field_transforms" -].message_type = _DOCUMENTTRANSFORM_FIELDTRANSFORM -_WRITERESULT.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WRITERESULT.fields_by_name[ - "transform_results" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._VALUE -) -_DOCUMENTCHANGE.fields_by_name[ - "document" -].message_type = ( - google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2._DOCUMENT -) -_DOCUMENTDELETE.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_DOCUMENTREMOVE.fields_by_name[ - "read_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["Write"] = _WRITE -DESCRIPTOR.message_types_by_name["DocumentTransform"] = _DOCUMENTTRANSFORM -DESCRIPTOR.message_types_by_name["WriteResult"] = _WRITERESULT -DESCRIPTOR.message_types_by_name["DocumentChange"] = _DOCUMENTCHANGE -DESCRIPTOR.message_types_by_name["DocumentDelete"] = _DOCUMENTDELETE -DESCRIPTOR.message_types_by_name["DocumentRemove"] = _DOCUMENTREMOVE -DESCRIPTOR.message_types_by_name["ExistenceFilter"] = _EXISTENCEFILTER -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Write = _reflection.GeneratedProtocolMessageType( - "Write", - (_message.Message,), - dict( - DESCRIPTOR=_WRITE, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A write on a document. - - - Attributes: - operation: - The operation to execute. - update: - A document to write. - delete: - A document name to delete. In the format: ``projects/{project_ - id}/databases/{database_id}/documents/{document_path}``. - transform: - Applies a transformation to a document. At most one - ``transform`` per document is allowed in a given request. An - ``update`` cannot follow a ``transform`` on the same document - in a given request. - update_mask: - The fields to update in this write. This field can be set - only when the operation is ``update``. If the mask is not set - for an ``update`` and the document exists, any existing data - will be overwritten. If the mask is set and the document on - the server has fields not covered by the mask, they are left - unchanged. Fields referenced in the mask, but not present in - the input document, are deleted from the document on the - server. The field paths in this mask must not contain a - reserved field name. - current_document: - An optional precondition on the document. The write will fail - if this is set and not met by the target document. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Write) - ), -) -_sym_db.RegisterMessage(Write) - -DocumentTransform = _reflection.GeneratedProtocolMessageType( - "DocumentTransform", - (_message.Message,), - dict( - FieldTransform=_reflection.GeneratedProtocolMessageType( - "FieldTransform", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTTRANSFORM_FIELDTRANSFORM, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A transformation of a field of the document. - - - Attributes: - field_path: - The path of the field. See - [Document.fields][google.firestore.v1beta1.Document.fields] - for the field path syntax reference. - transform_type: - The transformation to apply on the field. - set_to_server_value: - Sets the field to the given server value. - increment: - Adds the given value to the field's current value. This must - be an integer or a double value. If the field is not an - integer or double, or if the field does not yet exist, the - transformation will set the field to the given value. If - either of the given value or the current field value are - doubles, both values will be interpreted as doubles. Double - arithmetic and representation of double values follow IEEE 754 - semantics. If there is positive/negative integer overflow, the - field is resolved to the largest magnitude positive/negative - integer. - maximum: - Sets the field to the maximum of its current value and the - given value. This must be an integer or a double value. If - the field is not an integer or double, or if the field does - not yet exist, the transformation will set the field to the - given value. If a maximum operation is applied where the field - and the input value are of mixed types (that is - one is an - integer and one is a double) the field takes on the type of - the larger operand. If the operands are equivalent (e.g. 3 and - 3.0), the field does not change. 0, 0.0, and -0.0 are all - zero. The maximum of a zero stored value and zero input value - is always the stored value. The maximum of any numeric value x - and NaN is NaN. - minimum: - Sets the field to the minimum of its current value and the - given value. This must be an integer or a double value. If - the field is not an integer or double, or if the field does - not yet exist, the transformation will set the field to the - input value. If a minimum operation is applied where the field - and the input value are of mixed types (that is - one is an - integer and one is a double) the field takes on the type of - the smaller operand. If the operands are equivalent (e.g. 3 - and 3.0), the field does not change. 0, 0.0, and -0.0 are all - zero. The minimum of a zero stored value and zero input value - is always the stored value. The minimum of any numeric value x - and NaN is NaN. - append_missing_elements: - Append the given elements in order if they are not already - present in the current field value. If the field is not an - array, or if the field does not yet exist, it is first set to - the empty array. Equivalent numbers of different types (e.g. - 3L and 3.0) are considered equal when checking if a value is - missing. NaN is equal to NaN, and Null is equal to Null. If - the input contains multiple equivalent values, only the first - will be considered. The corresponding transform\_result will - be the null value. - remove_all_from_array: - Remove all of the given elements from the array in the field. - If the field is not an array, or if the field does not yet - exist, it is set to the empty array. Equivalent numbers of - the different types (e.g. 3L and 3.0) are considered equal - when deciding whether an element should be removed. NaN is - equal to NaN, and Null is equal to Null. This will remove all - equivalent values if there are duplicates. The corresponding - transform\_result will be the null value. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentTransform.FieldTransform) - ), - ), - DESCRIPTOR=_DOCUMENTTRANSFORM, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A transformation of a document. - - - Attributes: - document: - The name of the document to transform. - field_transforms: - The list of transformations to apply to the fields of the - document, in order. This must not be empty. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentTransform) - ), -) -_sym_db.RegisterMessage(DocumentTransform) -_sym_db.RegisterMessage(DocumentTransform.FieldTransform) - -WriteResult = _reflection.GeneratedProtocolMessageType( - "WriteResult", - (_message.Message,), - dict( - DESCRIPTOR=_WRITERESULT, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""The result of applying a write. - - - Attributes: - update_time: - The last update time of the document after applying the write. - Not set after a ``delete``. If the write did not actually - change the document, this will be the previous update\_time. - transform_results: - The results of applying each [DocumentTransform.FieldTransform - ][google.firestore.v1beta1.DocumentTransform.FieldTransform], - in the same order. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.WriteResult) - ), -) -_sym_db.RegisterMessage(WriteResult) - -DocumentChange = _reflection.GeneratedProtocolMessageType( - "DocumentChange", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTCHANGE, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A [Document][google.firestore.v1beta1.Document] has - changed. - - May be the result of multiple [writes][google.firestore.v1beta1.Write], - including deletes, that ultimately resulted in a new value for the - [Document][google.firestore.v1beta1.Document]. - - Multiple [DocumentChange][google.firestore.v1beta1.DocumentChange] - messages may be returned for the same logical change, if multiple - targets are affected. - - - Attributes: - document: - The new state of the - [Document][google.firestore.v1beta1.Document]. If ``mask`` is - set, contains only fields that were updated or added. - target_ids: - A set of target IDs of targets that match this document. - removed_target_ids: - A set of target IDs for targets that no longer match this - document. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentChange) - ), -) -_sym_db.RegisterMessage(DocumentChange) - -DocumentDelete = _reflection.GeneratedProtocolMessageType( - "DocumentDelete", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTDELETE, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A [Document][google.firestore.v1beta1.Document] has been - deleted. - - May be the result of multiple [writes][google.firestore.v1beta1.Write], - including updates, the last of which deleted the - [Document][google.firestore.v1beta1.Document]. - - Multiple [DocumentDelete][google.firestore.v1beta1.DocumentDelete] - messages may be returned for the same logical delete, if multiple - targets are affected. - - - Attributes: - document: - The resource name of the - [Document][google.firestore.v1beta1.Document] that was - deleted. - removed_target_ids: - A set of target IDs for targets that previously matched this - entity. - read_time: - The read timestamp at which the delete was observed. Greater - or equal to the ``commit_time`` of the delete. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentDelete) - ), -) -_sym_db.RegisterMessage(DocumentDelete) - -DocumentRemove = _reflection.GeneratedProtocolMessageType( - "DocumentRemove", - (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTREMOVE, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A [Document][google.firestore.v1beta1.Document] has been - removed from the view of the targets. - - Sent if the document is no longer relevant to a target and is out of - view. Can be sent instead of a DocumentDelete or a DocumentChange if the - server can not send the new value of the document. - - Multiple [DocumentRemove][google.firestore.v1beta1.DocumentRemove] - messages may be returned for the same logical write or delete, if - multiple targets are affected. - - - Attributes: - document: - The resource name of the - [Document][google.firestore.v1beta1.Document] that has gone - out of view. - removed_target_ids: - A set of target IDs for targets that previously matched this - document. - read_time: - The read timestamp at which the remove was observed. Greater - or equal to the ``commit_time`` of the change/delete/remove. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentRemove) - ), -) -_sym_db.RegisterMessage(DocumentRemove) - -ExistenceFilter = _reflection.GeneratedProtocolMessageType( - "ExistenceFilter", - (_message.Message,), - dict( - DESCRIPTOR=_EXISTENCEFILTER, - __module__="google.cloud.firestore_v1beta1.proto.write_pb2", - __doc__="""A digest of all the documents that match a given target. - - - Attributes: - target_id: - The target ID to which this filter applies. - count: - The total count of documents that match [target\_id][google.fi - restore.v1beta1.ExistenceFilter.target\_id]. If different - from the count of documents in the client that match, the - client must manually determine which documents no longer match - the target. - """, - # @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ExistenceFilter) - ), -) -_sym_db.RegisterMessage(ExistenceFilter) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/firestore/google/cloud/firestore_v1beta1/proto/write_pb2_grpc.py b/firestore/google/cloud/firestore_v1beta1/proto/write_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/proto/write_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/firestore/google/cloud/firestore_v1beta1/query.py b/firestore/google/cloud/firestore_v1beta1/query.py deleted file mode 100644 index 70dafb055760..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/query.py +++ /dev/null @@ -1,971 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Classes for representing queries for the Google Cloud Firestore API. - -A :class:`~google.cloud.firestore_v1beta1.query.Query` can be created directly -from a :class:`~google.cloud.firestore_v1beta1.collection.Collection`, -and that can be a more common way to create a query than direct usage of the -constructor. -""" -import copy -import math -import warnings - -from google.protobuf import wrappers_pb2 -import six - -from google.cloud.firestore_v1beta1 import _helpers -from google.cloud.firestore_v1beta1 import document -from google.cloud.firestore_v1beta1 import field_path as field_path_module -from google.cloud.firestore_v1beta1 import transforms -from google.cloud.firestore_v1beta1.gapic import enums -from google.cloud.firestore_v1beta1.proto import query_pb2 -from google.cloud.firestore_v1beta1.order import Order -from google.cloud.firestore_v1beta1.watch import Watch - -_EQ_OP = "==" -_operator_enum = enums.StructuredQuery.FieldFilter.Operator -_COMPARISON_OPERATORS = { - "<": _operator_enum.LESS_THAN, - "<=": _operator_enum.LESS_THAN_OR_EQUAL, - _EQ_OP: _operator_enum.EQUAL, - ">=": _operator_enum.GREATER_THAN_OR_EQUAL, - ">": _operator_enum.GREATER_THAN, - "array_contains": _operator_enum.ARRAY_CONTAINS, -} -_BAD_OP_STRING = "Operator string {!r} is invalid. Valid choices are: {}." -_BAD_OP_NAN_NULL = 'Only an equality filter ("==") can be used with None or NaN values' -_INVALID_WHERE_TRANSFORM = "Transforms cannot be used as where values." -_BAD_DIR_STRING = "Invalid direction {!r}. Must be one of {!r} or {!r}." -_INVALID_CURSOR_TRANSFORM = "Transforms cannot be used as cursor values." -_MISSING_ORDER_BY = ( - 'The "order by" field path {!r} is not present in the cursor data {!r}. ' - "All fields sent to ``order_by()`` must be present in the fields " - "if passed to one of ``start_at()`` / ``start_after()`` / " - "``end_before()`` / ``end_at()`` to define a cursor." -) -_NO_ORDERS_FOR_CURSOR = ( - "Attempting to create a cursor with no fields to order on. " - "When defining a cursor with one of ``start_at()`` / ``start_after()`` / " - "``end_before()`` / ``end_at()``, all fields in the cursor must " - "come from fields set in ``order_by()``." -) -_MISMATCH_CURSOR_W_ORDER_BY = "The cursor {!r} does not match the order fields {!r}." - - -class Query(object): - """Represents a query to the Firestore API. - - Instances of this class are considered immutable: all methods that - would modify an instance instead return a new instance. - - Args: - parent (~.firestore_v1beta1.collection.Collection): The collection - that this query applies to. - projection (Optional[google.cloud.proto.firestore.v1beta1.\ - query_pb2.StructuredQuery.Projection]): A projection of document - fields to limit the query results to. - field_filters (Optional[Tuple[google.cloud.proto.firestore.v1beta1.\ - query_pb2.StructuredQuery.FieldFilter, ...]]): The filters to be - applied in the query. - orders (Optional[Tuple[google.cloud.proto.firestore.v1beta1.\ - query_pb2.StructuredQuery.Order, ...]]): The "order by" entries - to use in the query. - limit (Optional[int]): The maximum number of documents the - query is allowed to return. - offset (Optional[int]): The number of results to skip. - start_at (Optional[Tuple[dict, bool]]): Two-tuple of - - * a mapping of fields. Any field that is present in this mapping - must also be present in ``orders`` - * an ``after`` flag - - The fields and the flag combine to form a cursor used as - a starting point in a query result set. If the ``after`` - flag is :data:`True`, the results will start just after any - documents which have fields matching the cursor, otherwise - any matching documents will be included in the result set. - When the query is formed, the document values - will be used in the order given by ``orders``. - end_at (Optional[Tuple[dict, bool]]): Two-tuple of - - * a mapping of fields. Any field that is present in this mapping - must also be present in ``orders`` - * a ``before`` flag - - The fields and the flag combine to form a cursor used as - an ending point in a query result set. If the ``before`` - flag is :data:`True`, the results will end just before any - documents which have fields matching the cursor, otherwise - any matching documents will be included in the result set. - When the query is formed, the document values - will be used in the order given by ``orders``. - """ - - ASCENDING = "ASCENDING" - """str: Sort query results in ascending order on a field.""" - DESCENDING = "DESCENDING" - """str: Sort query results in descending order on a field.""" - - def __init__( - self, - parent, - projection=None, - field_filters=(), - orders=(), - limit=None, - offset=None, - start_at=None, - end_at=None, - ): - self._parent = parent - self._projection = projection - self._field_filters = field_filters - self._orders = orders - self._limit = limit - self._offset = offset - self._start_at = start_at - self._end_at = end_at - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - self._parent == other._parent - and self._projection == other._projection - and self._field_filters == other._field_filters - and self._orders == other._orders - and self._limit == other._limit - and self._offset == other._offset - and self._start_at == other._start_at - and self._end_at == other._end_at - ) - - @property - def _client(self): - """The client of the parent collection. - - Returns: - ~.firestore_v1beta1.client.Client: The client that owns - this query. - """ - return self._parent._client - - def select(self, field_paths): - """Project documents matching query to a limited set of fields. - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - If the current query already has a projection set (i.e. has already - called :meth:`~google.cloud.firestore_v1beta1.query.Query.select`), - this will overwrite it. - - Args: - field_paths (Iterable[str, ...]): An iterable of field paths - (``.``-delimited list of field names) to use as a projection - of document fields in the query results. - - Returns: - ~.firestore_v1beta1.query.Query: A "projected" query. Acts as - a copy of the current query, modified with the newly added - projection. - Raises: - ValueError: If any ``field_path`` is invalid. - """ - field_paths = list(field_paths) - for field_path in field_paths: - field_path_module.split_field_path(field_path) # raises - - new_projection = query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ) - return self.__class__( - self._parent, - projection=new_projection, - field_filters=self._field_filters, - orders=self._orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - ) - - def where(self, field_path, op_string, value): - """Filter the query on a field. - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - Returns a new :class:`~google.cloud.firestore_v1beta1.query.Query` - that filters on a specific field path, according to an operation - (e.g. ``==`` or "equals") and a particular value to be paired with - that operation. - - Args: - field_path (str): A field path (``.``-delimited list of - field names) for the field to filter on. - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=`` - and ``>``. - value (Any): The value to compare the field against in the filter. - If ``value`` is :data:`None` or a NaN, then ``==`` is the only - allowed operation. - - Returns: - ~.firestore_v1beta1.query.Query: A filtered query. Acts as a - copy of the current query, modified with the newly added filter. - - Raises: - ValueError: If ``field_path`` is invalid. - ValueError: If ``value`` is a NaN or :data:`None` and - ``op_string`` is not ``==``. - """ - field_path_module.split_field_path(field_path) # raises - - if value is None: - if op_string != _EQ_OP: - raise ValueError(_BAD_OP_NAN_NULL) - filter_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL, - ) - elif _isnan(value): - if op_string != _EQ_OP: - raise ValueError(_BAD_OP_NAN_NULL) - filter_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NAN, - ) - elif isinstance(value, (transforms.Sentinel, transforms._ValueList)): - raise ValueError(_INVALID_WHERE_TRANSFORM) - else: - filter_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=_enum_from_op_string(op_string), - value=_helpers.encode_value(value), - ) - - new_filters = self._field_filters + (filter_pb,) - return self.__class__( - self._parent, - projection=self._projection, - field_filters=new_filters, - orders=self._orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - ) - - @staticmethod - def _make_order(field_path, direction): - """Helper for :meth:`order_by`.""" - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=_enum_from_direction(direction), - ) - - def order_by(self, field_path, direction=ASCENDING): - """Modify the query to add an order clause on a specific field. - - See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` - for more information on **field paths**. - - Successive :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` calls - will further refine the ordering of results returned by the query - (i.e. the new "order by" fields will be added to existing ones). - - Args: - field_path (str): A field path (``.``-delimited list of - field names) on which to order the query results. - direction (Optional[str]): The direction to order by. Must be one - of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to - :attr:`ASCENDING`. - - Returns: - ~.firestore_v1beta1.query.Query: An ordered query. Acts as a - copy of the current query, modified with the newly added - "order by" constraint. - - Raises: - ValueError: If ``field_path`` is invalid. - ValueError: If ``direction`` is not one of :attr:`ASCENDING` or - :attr:`DESCENDING`. - """ - field_path_module.split_field_path(field_path) # raises - - order_pb = self._make_order(field_path, direction) - - new_orders = self._orders + (order_pb,) - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=new_orders, - limit=self._limit, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - ) - - def limit(self, count): - """Limit a query to return a fixed number of results. - - If the current query already has a limit set, this will overwrite it. - - Args: - count (int): Maximum number of documents to return that match - the query. - - Returns: - ~.firestore_v1beta1.query.Query: A limited query. Acts as a - copy of the current query, modified with the newly added - "limit" filter. - """ - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=self._orders, - limit=count, - offset=self._offset, - start_at=self._start_at, - end_at=self._end_at, - ) - - def offset(self, num_to_skip): - """Skip to an offset in a query. - - If the current query already has specified an offset, this will - overwrite it. - - Args: - num_to_skip (int): The number of results to skip at the beginning - of query results. (Must be non-negative.) - - Returns: - ~.firestore_v1beta1.query.Query: An offset query. Acts as a - copy of the current query, modified with the newly added - "offset" field. - """ - return self.__class__( - self._parent, - projection=self._projection, - field_filters=self._field_filters, - orders=self._orders, - limit=self._limit, - offset=num_to_skip, - start_at=self._start_at, - end_at=self._end_at, - ) - - def _cursor_helper(self, document_fields, before, start): - """Set values to be used for a ``start_at`` or ``end_at`` cursor. - - The values will later be used in a query protobuf. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - before (bool): Flag indicating if the document in - ``document_fields`` should (:data:`False`) or - shouldn't (:data:`True`) be included in the result set. - start (Optional[bool]): determines if the cursor is a ``start_at`` - cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`). - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. Acts as - a copy of the current query, modified with the newly added - "start at" cursor. - """ - if isinstance(document_fields, tuple): - document_fields = list(document_fields) - elif isinstance(document_fields, document.DocumentSnapshot): - if document_fields.reference._path[:-1] != self._parent._path: - raise ValueError( - "Cannot use snapshot from another collection as a cursor." - ) - else: - # NOTE: We copy so that the caller can't modify after calling. - document_fields = copy.deepcopy(document_fields) - - cursor_pair = document_fields, before - query_kwargs = { - "projection": self._projection, - "field_filters": self._field_filters, - "orders": self._orders, - "limit": self._limit, - "offset": self._offset, - } - if start: - query_kwargs["start_at"] = cursor_pair - query_kwargs["end_at"] = self._end_at - else: - query_kwargs["start_at"] = self._start_at - query_kwargs["end_at"] = cursor_pair - - return self.__class__(self._parent, **query_kwargs) - - def start_at(self, document_fields): - """Start query results at a particular document value. - - The result set will **include** the document specified by - ``document_fields``. - - If the current query already has specified a start cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1beta1.query.Query.start_after` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. Acts as - a copy of the current query, modified with the newly added - "start at" cursor. - """ - return self._cursor_helper(document_fields, before=True, start=True) - - def start_after(self, document_fields): - """Start query results after a particular document value. - - The result set will **exclude** the document specified by - ``document_fields``. - - If the current query already has specified a start cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1beta1.query.Query.start_at` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. Acts as - a copy of the current query, modified with the newly added - "start after" cursor. - """ - return self._cursor_helper(document_fields, before=False, start=True) - - def end_before(self, document_fields): - """End query results before a particular document value. - - The result set will **exclude** the document specified by - ``document_fields``. - - If the current query already has specified an end cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1beta1.query.Query.end_at` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. Acts as - a copy of the current query, modified with the newly added - "end before" cursor. - """ - return self._cursor_helper(document_fields, before=True, start=False) - - def end_at(self, document_fields): - """End query results at a particular document value. - - The result set will **include** the document specified by - ``document_fields``. - - If the current query already has specified an end cursor -- either - via this method or - :meth:`~google.cloud.firestore_v1beta1.query.Query.end_before` -- this will - overwrite it. - - When the query is sent to the server, the ``document_fields`` will - be used in the order given by fields set by - :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`. - - Args: - document_fields (Union[~.firestore_v1beta1.\ - document.DocumentSnapshot, dict, list, tuple]): a document - snapshot or a dictionary/list/tuple of fields representing a - query results cursor. A cursor is a collection of values that - represent a position in a query result set. - - Returns: - ~.firestore_v1beta1.query.Query: A query with cursor. Acts as - a copy of the current query, modified with the newly added - "end at" cursor. - """ - return self._cursor_helper(document_fields, before=False, start=False) - - def _filters_pb(self): - """Convert all the filters into a single generic Filter protobuf. - - This may be a lone field filter or unary filter, may be a composite - filter or may be :data:`None`. - - Returns: - google.cloud.firestore_v1beta1.types.\ - StructuredQuery.Filter: A "generic" filter representing the - current query's filters. - """ - num_filters = len(self._field_filters) - if num_filters == 0: - return None - elif num_filters == 1: - return _filter_pb(self._field_filters[0]) - else: - composite_filter = query_pb2.StructuredQuery.CompositeFilter( - op=enums.StructuredQuery.CompositeFilter.Operator.AND, - filters=[_filter_pb(filter_) for filter_ in self._field_filters], - ) - return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter) - - @staticmethod - def _normalize_projection(projection): - """Helper: convert field paths to message.""" - if projection is not None: - - fields = list(projection.fields) - - if not fields: - field_ref = query_pb2.StructuredQuery.FieldReference( - field_path="__name__" - ) - return query_pb2.StructuredQuery.Projection(fields=[field_ref]) - - return projection - - def _normalize_orders(self): - """Helper: adjust orders based on cursors, where clauses.""" - orders = list(self._orders) - _has_snapshot_cursor = False - - if self._start_at: - if isinstance(self._start_at[0], document.DocumentSnapshot): - _has_snapshot_cursor = True - - if self._end_at: - if isinstance(self._end_at[0], document.DocumentSnapshot): - _has_snapshot_cursor = True - - if _has_snapshot_cursor: - should_order = [ - _enum_from_op_string(key) - for key in _COMPARISON_OPERATORS - if key not in (_EQ_OP, "array_contains") - ] - order_keys = [order.field.field_path for order in orders] - for filter_ in self._field_filters: - field = filter_.field.field_path - if filter_.op in should_order and field not in order_keys: - orders.append(self._make_order(field, "ASCENDING")) - if not orders: - orders.append(self._make_order("__name__", "ASCENDING")) - else: - order_keys = [order.field.field_path for order in orders] - if "__name__" not in order_keys: - direction = orders[-1].direction # enum? - orders.append(self._make_order("__name__", direction)) - - return orders - - def _normalize_cursor(self, cursor, orders): - """Helper: convert cursor to a list of values based on orders.""" - if cursor is None: - return - - if not orders: - raise ValueError(_NO_ORDERS_FOR_CURSOR) - - document_fields, before = cursor - - order_keys = [order.field.field_path for order in orders] - - if isinstance(document_fields, document.DocumentSnapshot): - snapshot = document_fields - document_fields = snapshot.to_dict() - document_fields["__name__"] = snapshot.reference - - if isinstance(document_fields, dict): - # Transform to list using orders - values = [] - data = document_fields - for order_key in order_keys: - try: - values.append(field_path_module.get_nested_value(order_key, data)) - except KeyError: - msg = _MISSING_ORDER_BY.format(order_key, data) - raise ValueError(msg) - document_fields = values - - if len(document_fields) != len(orders): - msg = _MISMATCH_CURSOR_W_ORDER_BY.format(document_fields, order_keys) - raise ValueError(msg) - - _transform_bases = (transforms.Sentinel, transforms._ValueList) - - for index, key_field in enumerate(zip(order_keys, document_fields)): - key, field = key_field - - if isinstance(field, _transform_bases): - msg = _INVALID_CURSOR_TRANSFORM - raise ValueError(msg) - - if key == "__name__" and isinstance(field, six.string_types): - document_fields[index] = self._parent.document(field) - - return document_fields, before - - def _to_protobuf(self): - """Convert the current query into the equivalent protobuf. - - Returns: - google.cloud.firestore_v1beta1.types.StructuredQuery: The - query protobuf. - """ - projection = self._normalize_projection(self._projection) - orders = self._normalize_orders() - start_at = self._normalize_cursor(self._start_at, orders) - end_at = self._normalize_cursor(self._end_at, orders) - - query_kwargs = { - "select": projection, - "from": [ - query_pb2.StructuredQuery.CollectionSelector( - collection_id=self._parent.id - ) - ], - "where": self._filters_pb(), - "order_by": orders, - "start_at": _cursor_pb(start_at), - "end_at": _cursor_pb(end_at), - } - if self._offset is not None: - query_kwargs["offset"] = self._offset - if self._limit is not None: - query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit) - - return query_pb2.StructuredQuery(**query_kwargs) - - def get(self, transaction=None): - """Deprecated alias for :meth:`stream`.""" - warnings.warn( - "'Query.get' is deprecated: please use 'Query.stream' instead.", - DeprecationWarning, - stacklevel=2, - ) - return self.stream(transaction=transaction) - - def stream(self, transaction=None): - """Read the documents in the collection that match this query. - - This sends a ``RunQuery`` RPC and then returns an iterator which - consumes each document returned in the stream of ``RunQueryResponse`` - messages. - - .. note:: - - The underlying stream of responses will time out after - the ``max_rpc_timeout_millis`` value set in the GAPIC - client configuration for the ``RunQuery`` API. Snapshots - not consumed from the iterator before that point will be lost. - - If a ``transaction`` is used and it already has write operations - added, this method cannot be used (i.e. read-after-write is not - allowed). - - Args: - transaction (Optional[~.firestore_v1beta1.transaction.\ - Transaction]): An existing transaction that this query will - run in. - - Yields: - ~.firestore_v1beta1.document.DocumentSnapshot: The next - document that fulfills the query. - """ - parent_path, expected_prefix = self._parent._parent_info() - response_iterator = self._client._firestore_api.run_query( - parent_path, - self._to_protobuf(), - transaction=_helpers.get_transaction_id(transaction), - metadata=self._client._rpc_metadata, - ) - - for response in response_iterator: - snapshot = _query_response_to_snapshot( - response, self._parent, expected_prefix - ) - if snapshot is not None: - yield snapshot - - def on_snapshot(self, callback): - """Monitor the documents in this collection that match this query. - - This starts a watch on this query using a background thread. The - provided callback is run on the snapshot of the documents. - - Args: - callback(~.firestore.query.QuerySnapshot): a callback to run when - a change occurs. - - Example: - from google.cloud import firestore_v1beta1 - - db = firestore_v1beta1.Client() - query_ref = db.collection(u'users').where("user", "==", u'Ada') - - def on_snapshot(docs, changes, read_time): - for doc in docs: - print(u'{} => {}'.format(doc.id, doc.to_dict())) - - # Watch this query - query_watch = query_ref.on_snapshot(on_snapshot) - - # Terminate this watch - query_watch.unsubscribe() - """ - return Watch.for_query( - self, callback, document.DocumentSnapshot, document.DocumentReference - ) - - def _comparator(self, doc1, doc2): - _orders = self._orders - - # Add implicit sorting by name, using the last specified direction. - if len(_orders) == 0: - lastDirection = Query.ASCENDING - else: - if _orders[-1].direction == 1: - lastDirection = Query.ASCENDING - else: - lastDirection = Query.DESCENDING - - orderBys = list(_orders) - - order_pb = query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path="id"), - direction=_enum_from_direction(lastDirection), - ) - orderBys.append(order_pb) - - for orderBy in orderBys: - if orderBy.field.field_path == "id": - # If ordering by docuent id, compare resource paths. - comp = Order()._compare_to(doc1.reference._path, doc2.reference._path) - else: - if ( - orderBy.field.field_path not in doc1._data - or orderBy.field.field_path not in doc2._data - ): - raise ValueError( - "Can only compare fields that exist in the " - "DocumentSnapshot. Please include the fields you are " - "ordering on in your select() call." - ) - v1 = doc1._data[orderBy.field.field_path] - v2 = doc2._data[orderBy.field.field_path] - encoded_v1 = _helpers.encode_value(v1) - encoded_v2 = _helpers.encode_value(v2) - comp = Order().compare(encoded_v1, encoded_v2) - - if comp != 0: - # 1 == Ascending, -1 == Descending - return orderBy.direction * comp - - return 0 - - -def _enum_from_op_string(op_string): - """Convert a string representation of a binary operator to an enum. - - These enums come from the protobuf message definition - ``StructuredQuery.FieldFilter.Operator``. - - Args: - op_string (str): A comparison operation in the form of a string. - Acceptable values are ``<``, ``<=``, ``==``, ``>=`` - and ``>``. - - Returns: - int: The enum corresponding to ``op_string``. - - Raises: - ValueError: If ``op_string`` is not a valid operator. - """ - try: - return _COMPARISON_OPERATORS[op_string] - except KeyError: - choices = ", ".join(sorted(_COMPARISON_OPERATORS.keys())) - msg = _BAD_OP_STRING.format(op_string, choices) - raise ValueError(msg) - - -def _isnan(value): - """Check if a value is NaN. - - This differs from ``math.isnan`` in that **any** input type is - allowed. - - Args: - value (Any): A value to check for NaN-ness. - - Returns: - bool: Indicates if the value is the NaN float. - """ - if isinstance(value, float): - return math.isnan(value) - else: - return False - - -def _enum_from_direction(direction): - """Convert a string representation of a direction to an enum. - - Args: - direction (str): A direction to order by. Must be one of - :attr:`~google.cloud.firestore.Query.ASCENDING` or - :attr:`~google.cloud.firestore.Query.DESCENDING`. - - Returns: - int: The enum corresponding to ``direction``. - - Raises: - ValueError: If ``direction`` is not a valid direction. - """ - if isinstance(direction, int): - return direction - - if direction == Query.ASCENDING: - return enums.StructuredQuery.Direction.ASCENDING - elif direction == Query.DESCENDING: - return enums.StructuredQuery.Direction.DESCENDING - else: - msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING) - raise ValueError(msg) - - -def _filter_pb(field_or_unary): - """Convert a specific protobuf filter to the generic filter type. - - Args: - field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\ - query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\ - firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A - field or unary filter to convert to a generic filter. - - Returns: - google.cloud.firestore_v1beta1.types.\ - StructuredQuery.Filter: A "generic" filter. - - Raises: - ValueError: If ``field_or_unary`` is not a field or unary filter. - """ - if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter): - return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary) - elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter): - return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary) - else: - raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary) - - -def _cursor_pb(cursor_pair): - """Convert a cursor pair to a protobuf. - - If ``cursor_pair`` is :data:`None`, just returns :data:`None`. - - Args: - cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of - - * a list of field values. - * a ``before`` flag - - Returns: - Optional[google.cloud.firestore_v1beta1.types.Cursor]: A - protobuf cursor corresponding to the values. - """ - if cursor_pair is not None: - data, before = cursor_pair - value_pbs = [_helpers.encode_value(value) for value in data] - return query_pb2.Cursor(values=value_pbs, before=before) - - -def _query_response_to_snapshot(response_pb, collection, expected_prefix): - """Parse a query response protobuf to a document snapshot. - - Args: - response_pb (google.cloud.proto.firestore.v1beta1.\ - firestore_pb2.RunQueryResponse): A - collection (~.firestore_v1beta1.collection.CollectionReference): A - reference to the collection that initiated the query. - expected_prefix (str): The expected prefix for fully-qualified - document names returned in the query results. This can be computed - directly from ``collection`` via :meth:`_parent_info`. - - Returns: - Optional[~.firestore.document.DocumentSnapshot]: A - snapshot of the data returned in the query. If ``response_pb.document`` - is not set, the snapshot will be :data:`None`. - """ - if not response_pb.HasField("document"): - return None - - document_id = _helpers.get_doc_id(response_pb.document, expected_prefix) - reference = collection.document(document_id) - data = _helpers.decode_dict(response_pb.document.fields, collection._client) - snapshot = document.DocumentSnapshot( - reference, - data, - exists=True, - read_time=response_pb.read_time, - create_time=response_pb.document.create_time, - update_time=response_pb.document.update_time, - ) - return snapshot diff --git a/firestore/google/cloud/firestore_v1beta1/transaction.py b/firestore/google/cloud/firestore_v1beta1/transaction.py deleted file mode 100644 index 9a37f18d8061..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/transaction.py +++ /dev/null @@ -1,409 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpers for applying Google Cloud Firestore changes in a transaction.""" - - -import random -import time - -import six - -from google.api_core import exceptions -from google.cloud.firestore_v1beta1 import batch -from google.cloud.firestore_v1beta1 import types - - -MAX_ATTEMPTS = 5 -"""int: Default number of transaction attempts (with retries).""" -_CANT_BEGIN = "The transaction has already begun. Current transaction ID: {!r}." -_MISSING_ID_TEMPLATE = "The transaction has no transaction ID, so it cannot be {}." -_CANT_ROLLBACK = _MISSING_ID_TEMPLATE.format("rolled back") -_CANT_COMMIT = _MISSING_ID_TEMPLATE.format("committed") -_WRITE_READ_ONLY = "Cannot perform write operation in read-only transaction." -_INITIAL_SLEEP = 1.0 -"""float: Initial "max" for sleep interval. To be used in :func:`_sleep`.""" -_MAX_SLEEP = 30.0 -"""float: Eventual "max" sleep time. To be used in :func:`_sleep`.""" -_MULTIPLIER = 2.0 -"""float: Multiplier for exponential backoff. To be used in :func:`_sleep`.""" -_EXCEED_ATTEMPTS_TEMPLATE = "Failed to commit transaction in {:d} attempts." -_CANT_RETRY_READ_ONLY = "Only read-write transactions can be retried." - - -class Transaction(batch.WriteBatch): - """Accumulate read-and-write operations to be sent in a transaction. - - Args: - client (~.firestore_v1beta1.client.Client): The client that - created this transaction. - max_attempts (Optional[int]): The maximum number of attempts for - the transaction (i.e. allowing retries). Defaults to - :attr:`~google.cloud.firestore_v1beta1.transaction.MAX_ATTEMPTS`. - read_only (Optional[bool]): Flag indicating if the transaction - should be read-only or should allow writes. Defaults to - :data:`False`. - """ - - def __init__(self, client, max_attempts=MAX_ATTEMPTS, read_only=False): - super(Transaction, self).__init__(client) - self._max_attempts = max_attempts - self._read_only = read_only - self._id = None - - def _add_write_pbs(self, write_pbs): - """Add `Write`` protobufs to this transaction. - - Args: - write_pbs (List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.Write]): A list of write protobufs to be added. - - Raises: - ValueError: If this transaction is read-only. - """ - if self._read_only: - raise ValueError(_WRITE_READ_ONLY) - - super(Transaction, self)._add_write_pbs(write_pbs) - - def _options_protobuf(self, retry_id): - """Convert the current object to protobuf. - - The ``retry_id`` value is used when retrying a transaction that - failed (e.g. due to contention). It is intended to be the "first" - transaction that failed (i.e. if multiple retries are needed). - - Args: - retry_id (Union[bytes, NoneType]): Transaction ID of a transaction - to be retried. - - Returns: - Optional[google.cloud.firestore_v1beta1.types.TransactionOptions]: - The protobuf ``TransactionOptions`` if ``read_only==True`` or if - there is a transaction ID to be retried, else :data:`None`. - - Raises: - ValueError: If ``retry_id`` is not :data:`None` but the - transaction is read-only. - """ - if retry_id is not None: - if self._read_only: - raise ValueError(_CANT_RETRY_READ_ONLY) - - return types.TransactionOptions( - read_write=types.TransactionOptions.ReadWrite( - retry_transaction=retry_id - ) - ) - elif self._read_only: - return types.TransactionOptions( - read_only=types.TransactionOptions.ReadOnly() - ) - else: - return None - - @property - def in_progress(self): - """Determine if this transaction has already begun. - - Returns: - bool: Indicates if the transaction has started. - """ - return self._id is not None - - @property - def id(self): - """Get the current transaction ID. - - Returns: - Optional[bytes]: The transaction ID (or :data:`None` if the - current transaction is not in progress). - """ - return self._id - - def _begin(self, retry_id=None): - """Begin the transaction. - - Args: - retry_id (Optional[bytes]): Transaction ID of a transaction to be - retried. - - Raises: - ValueError: If the current transaction has already begun. - """ - if self.in_progress: - msg = _CANT_BEGIN.format(self._id) - raise ValueError(msg) - - transaction_response = self._client._firestore_api.begin_transaction( - self._client._database_string, - options_=self._options_protobuf(retry_id), - metadata=self._client._rpc_metadata, - ) - self._id = transaction_response.transaction - - def _clean_up(self): - """Clean up the instance after :meth:`_rollback`` or :meth:`_commit``. - - This intended to occur on success or failure of the associated RPCs. - """ - self._write_pbs = [] - self._id = None - - def _rollback(self): - """Roll back the transaction. - - Raises: - ValueError: If no transaction is in progress. - """ - if not self.in_progress: - raise ValueError(_CANT_ROLLBACK) - - try: - # NOTE: The response is just ``google.protobuf.Empty``. - self._client._firestore_api.rollback( - self._client._database_string, - self._id, - metadata=self._client._rpc_metadata, - ) - finally: - self._clean_up() - - def _commit(self): - """Transactionally commit the changes accumulated. - - Returns: - List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.WriteResult, ...]: The write results corresponding - to the changes committed, returned in the same order as the - changes were applied to this transaction. A write result contains - an ``update_time`` field. - - Raises: - ValueError: If no transaction is in progress. - """ - if not self.in_progress: - raise ValueError(_CANT_COMMIT) - - commit_response = _commit_with_retry(self._client, self._write_pbs, self._id) - - self._clean_up() - return list(commit_response.write_results) - - -class _Transactional(object): - """Provide a callable object to use as a transactional decorater. - - This is surfaced via - :func:`~google.cloud.firestore_v1beta1.transaction.transactional`. - - Args: - to_wrap (Callable[~.firestore_v1beta1.transaction.Transaction, \ - Any]): A callable that should be run (and retried) in a - transaction. - """ - - def __init__(self, to_wrap): - self.to_wrap = to_wrap - self.current_id = None - """Optional[bytes]: The current transaction ID.""" - self.retry_id = None - """Optional[bytes]: The ID of the first attempted transaction.""" - - def _reset(self): - """Unset the transaction IDs.""" - self.current_id = None - self.retry_id = None - - def _pre_commit(self, transaction, *args, **kwargs): - """Begin transaction and call the wrapped callable. - - If the callable raises an exception, the transaction will be rolled - back. If not, the transaction will be "ready" for ``Commit`` (i.e. - it will have staged writes). - - Args: - transaction (~.firestore_v1beta1.transaction.Transaction): A - transaction to execute the callable within. - args (Tuple[Any, ...]): The extra positional arguments to pass - along to the wrapped callable. - kwargs (Dict[str, Any]): The extra keyword arguments to pass - along to the wrapped callable. - - Returns: - Any: result of the wrapped callable. - - Raises: - Exception: Any failure caused by ``to_wrap``. - """ - # Force the ``transaction`` to be not "in progress". - transaction._clean_up() - transaction._begin(retry_id=self.retry_id) - - # Update the stored transaction IDs. - self.current_id = transaction._id - if self.retry_id is None: - self.retry_id = self.current_id - try: - return self.to_wrap(transaction, *args, **kwargs) - except: # noqa - # NOTE: If ``rollback`` fails this will lose the information - # from the original failure. - transaction._rollback() - raise - - def _maybe_commit(self, transaction): - """Try to commit the transaction. - - If the transaction is read-write and the ``Commit`` fails with the - ``ABORTED`` status code, it will be retried. Any other failure will - not be caught. - - Args: - transaction (~.firestore_v1beta1.transaction.Transaction): The - transaction to be ``Commit``-ed. - - Returns: - bool: Indicating if the commit succeeded. - """ - try: - transaction._commit() - return True - except exceptions.GoogleAPICallError as exc: - if transaction._read_only: - raise - - if isinstance(exc, exceptions.Aborted): - # If a read-write transaction returns ABORTED, retry. - return False - else: - raise - - def __call__(self, transaction, *args, **kwargs): - """Execute the wrapped callable within a transaction. - - Args: - transaction (~.firestore_v1beta1.transaction.Transaction): A - transaction to execute the callable within. - args (Tuple[Any, ...]): The extra positional arguments to pass - along to the wrapped callable. - kwargs (Dict[str, Any]): The extra keyword arguments to pass - along to the wrapped callable. - - Returns: - Any: The result of the wrapped callable. - - Raises: - ValueError: If the transaction does not succeed in - ``max_attempts``. - """ - self._reset() - - for attempt in six.moves.xrange(transaction._max_attempts): - result = self._pre_commit(transaction, *args, **kwargs) - succeeded = self._maybe_commit(transaction) - if succeeded: - return result - - # Subsequent requests will use the failed transaction ID as part of - # the ``BeginTransactionRequest`` when restarting this transaction - # (via ``options.retry_transaction``). This preserves the "spot in - # line" of the transaction, so exponential backoff is not required - # in this case. - - transaction._rollback() - msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts) - raise ValueError(msg) - - -def transactional(to_wrap): - """Decorate a callable so that it runs in a transaction. - - Args: - to_wrap (Callable[~.firestore_v1beta1.transaction.Transaction, \ - Any]): A callable that should be run (and retried) in a - transaction. - - Returns: - Callable[~.firestore_v1beta1.transaction.Transaction, Any]: the - wrapped callable. - """ - return _Transactional(to_wrap) - - -def _commit_with_retry(client, write_pbs, transaction_id): - """Call ``Commit`` on the GAPIC client with retry / sleep. - - Retries the ``Commit`` RPC on Unavailable. Usually this RPC-level - retry is handled by the underlying GAPICd client, but in this case it - doesn't because ``Commit`` is not always idempotent. But here we know it - is "idempotent"-like because it has a transaction ID. We also need to do - our own retry to special-case the ``INVALID_ARGUMENT`` error. - - Args: - client (~.firestore_v1beta1.client.Client): A client with - GAPIC client and configuration details. - write_pbs (List[google.cloud.proto.firestore.v1beta1.\ - write_pb2.Write, ...]): A ``Write`` protobuf instance to - be committed. - transaction_id (bytes): ID of an existing transaction that - this commit will run in. - - Returns: - google.cloud.firestore_v1beta1.types.CommitResponse: - The protobuf response from ``Commit``. - - Raises: - ~google.api_core.exceptions.GoogleAPICallError: If a non-retryable - exception is encountered. - """ - current_sleep = _INITIAL_SLEEP - while True: - try: - return client._firestore_api.commit( - client._database_string, - write_pbs, - transaction=transaction_id, - metadata=client._rpc_metadata, - ) - except exceptions.ServiceUnavailable: - # Retry - pass - - current_sleep = _sleep(current_sleep) - - -def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER): - """Sleep and produce a new sleep time. - - .. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\ - 2015/03/backoff.html - - Select a duration between zero and ``current_sleep``. It might seem - counterintuitive to have so much jitter, but - `Exponential Backoff And Jitter`_ argues that "full jitter" is - the best strategy. - - Args: - current_sleep (float): The current "max" for sleep interval. - max_sleep (Optional[float]): Eventual "max" sleep time - multiplier (Optional[float]): Multiplier for exponential backoff. - - Returns: - float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever - is smaller) - """ - actual_sleep = random.uniform(0.0, current_sleep) - time.sleep(actual_sleep) - return min(multiplier * current_sleep, max_sleep) diff --git a/firestore/google/cloud/firestore_v1beta1/transforms.py b/firestore/google/cloud/firestore_v1beta1/transforms.py deleted file mode 100644 index 4a64cf9ec3e3..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/transforms.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helpful constants to use for Google Cloud Firestore.""" - - -class Sentinel(object): - """Sentinel objects used to signal special handling.""" - - __slots__ = ("description",) - - def __init__(self, description): - self.description = description - - def __repr__(self): - return "Sentinel: {}".format(self.description) - - -DELETE_FIELD = Sentinel("Value used to delete a field in a document.") - - -SERVER_TIMESTAMP = Sentinel( - "Value used to set a document field to the server timestamp." -) - - -class _ValueList(object): - """Read-only list of values. - - Args: - values (List | Tuple): values held in the helper. - """ - - slots = ("_values",) - - def __init__(self, values): - if not isinstance(values, (list, tuple)): - raise ValueError("'values' must be a list or tuple.") - - if len(values) == 0: - raise ValueError("'values' must be non-empty.") - - self._values = list(values) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._values == other._values - - @property - def values(self): - """Values to append. - - Returns (List): - values to be appended by the transform. - """ - return self._values - - -class ArrayUnion(_ValueList): - """Field transform: appends missing values to an array field. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1beta1#google.firestore.v1beta1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1beta1.ArrayValue.google.firestore.v1beta1.DocumentTransform.FieldTransform.append_missing_elements - - Args: - values (List | Tuple): values to append. - """ - - -class ArrayRemove(_ValueList): - """Field transform: remove values from an array field. - - See: - https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1beta1#google.firestore.v1beta1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1beta1.ArrayValue.google.firestore.v1beta1.DocumentTransform.FieldTransform.remove_all_from_array - - Args: - values (List | Tuple): values to remove. - """ diff --git a/firestore/google/cloud/firestore_v1beta1/types.py b/firestore/google/cloud/firestore_v1beta1/types.py deleted file mode 100644 index 90c03b8aba2e..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/types.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 -from google.type import latlng_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.firestore_v1beta1.proto import common_pb2 -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.proto import firestore_pb2 -from google.cloud.firestore_v1beta1.proto import query_pb2 -from google.cloud.firestore_v1beta1.proto import write_pb2 - - -_shared_modules = [ - http_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - struct_pb2, - timestamp_pb2, - wrappers_pb2, - status_pb2, - latlng_pb2, -] - -_local_modules = [common_pb2, document_pb2, firestore_pb2, query_pb2, write_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.firestore_v1beta1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/firestore/google/cloud/firestore_v1beta1/watch.py b/firestore/google/cloud/firestore_v1beta1/watch.py deleted file mode 100644 index 63ded0d2d25b..000000000000 --- a/firestore/google/cloud/firestore_v1beta1/watch.py +++ /dev/null @@ -1,722 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import collections -import threading -import datetime -from enum import Enum -import functools - -import pytz - -from google.api_core.bidi import ResumableBidiRpc -from google.api_core.bidi import BackgroundConsumer -from google.cloud.firestore_v1beta1.proto import firestore_pb2 -from google.cloud.firestore_v1beta1 import _helpers - -from google.api_core import exceptions - -import grpc - -"""Python client for Google Cloud Firestore Watch.""" - -_LOGGER = logging.getLogger(__name__) - -WATCH_TARGET_ID = 0x5079 # "Py" - -GRPC_STATUS_CODE = { - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, - "DO_NOT_USE": -1, -} -_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated" -_RETRYABLE_STREAM_ERRORS = ( - exceptions.DeadlineExceeded, - exceptions.ServiceUnavailable, - exceptions.InternalServerError, - exceptions.Unknown, - exceptions.GatewayTimeout, -) - -DocTreeEntry = collections.namedtuple("DocTreeEntry", ["value", "index"]) - - -class WatchDocTree(object): - # TODO: Currently this uses a dict. Other implementations us an rbtree. - # The performance of this implementation should be investigated and may - # require modifying the underlying datastructure to a rbtree. - def __init__(self): - self._dict = {} - self._index = 0 - - def keys(self): - return list(self._dict.keys()) - - def _copy(self): - wdt = WatchDocTree() - wdt._dict = self._dict.copy() - wdt._index = self._index - self = wdt - return self - - def insert(self, key, value): - self = self._copy() - self._dict[key] = DocTreeEntry(value, self._index) - self._index += 1 - return self - - def find(self, key): - return self._dict[key] - - def remove(self, key): - self = self._copy() - del self._dict[key] - return self - - def __iter__(self): - for k in self._dict: - yield k - - def __len__(self): - return len(self._dict) - - def __contains__(self, k): - return k in self._dict - - -class ChangeType(Enum): - ADDED = 1 - REMOVED = 2 - MODIFIED = 3 - - -class DocumentChange(object): - def __init__(self, type, document, old_index, new_index): - """DocumentChange - - Args: - type (ChangeType): - document (document.DocumentSnapshot): - old_index (int): - new_index (int): - """ - # TODO: spec indicated an isEqual param also - self.type = type - self.document = document - self.old_index = old_index - self.new_index = new_index - - -class WatchResult(object): - def __init__(self, snapshot, name, change_type): - self.snapshot = snapshot - self.name = name - self.change_type = change_type - - -def _maybe_wrap_exception(exception): - """Wraps a gRPC exception class, if needed.""" - if isinstance(exception, grpc.RpcError): - return exceptions.from_grpc_error(exception) - return exception - - -def document_watch_comparator(doc1, doc2): - assert doc1 == doc2, "Document watches only support one document." - return 0 - - -class Watch(object): - - BackgroundConsumer = BackgroundConsumer # FBO unit tests - ResumableBidiRpc = ResumableBidiRpc # FBO unit tests - - def __init__( - self, - document_reference, - firestore, - target, - comparator, - snapshot_callback, - document_snapshot_cls, - document_reference_cls, - BackgroundConsumer=None, # FBO unit testing - ResumableBidiRpc=None, # FBO unit testing - ): - """ - Args: - firestore: - target: - comparator: - snapshot_callback: Callback method to process snapshots. - Args: - docs (List(DocumentSnapshot)): A callback that returns the - ordered list of documents stored in this snapshot. - changes (List(str)): A callback that returns the list of - changed documents since the last snapshot delivered for - this watch. - read_time (string): The ISO 8601 time at which this - snapshot was obtained. - - document_snapshot_cls: instance of DocumentSnapshot - document_reference_cls: instance of DocumentReference - """ - self._document_reference = document_reference - self._firestore = firestore - self._api = firestore._firestore_api - self._targets = target - self._comparator = comparator - self.DocumentSnapshot = document_snapshot_cls - self.DocumentReference = document_reference_cls - self._snapshot_callback = snapshot_callback - self._closing = threading.Lock() - self._closed = False - - def should_recover(exc): # pragma: NO COVER - return ( - isinstance(exc, grpc.RpcError) - and exc.code() == grpc.StatusCode.UNAVAILABLE - ) - - initial_request = firestore_pb2.ListenRequest( - database=self._firestore._database_string, add_target=self._targets - ) - - if ResumableBidiRpc is None: - ResumableBidiRpc = self.ResumableBidiRpc # FBO unit tests - - self._rpc = ResumableBidiRpc( - self._api.transport.listen, - initial_request=initial_request, - should_recover=should_recover, - metadata=self._firestore._rpc_metadata, - ) - - self._rpc.add_done_callback(self._on_rpc_done) - - # Initialize state for on_snapshot - # The sorted tree of QueryDocumentSnapshots as sent in the last - # snapshot. We only look at the keys. - self.doc_tree = WatchDocTree() - - # A map of document names to QueryDocumentSnapshots for the last sent - # snapshot. - self.doc_map = {} - - # The accumulates map of document changes (keyed by document name) for - # the current snapshot. - self.change_map = {} - - # The current state of the query results. - self.current = False - - # We need this to track whether we've pushed an initial set of changes, - # since we should push those even when there are no changes, if there - # aren't docs. - self.has_pushed = False - - # The server assigns and updates the resume token. - self.resume_token = None - if BackgroundConsumer is None: # FBO unit tests - BackgroundConsumer = self.BackgroundConsumer - - self._consumer = BackgroundConsumer(self._rpc, self.on_snapshot) - self._consumer.start() - - @property - def is_active(self): - """bool: True if this manager is actively streaming. - - Note that ``False`` does not indicate this is complete shut down, - just that it stopped getting new messages. - """ - return self._consumer is not None and self._consumer.is_active - - def close(self, reason=None): - """Stop consuming messages and shutdown all helper threads. - - This method is idempotent. Additional calls will have no effect. - - Args: - reason (Any): The reason to close this. If None, this is considered - an "intentional" shutdown. - """ - with self._closing: - if self._closed: - return - - # Stop consuming messages. - if self.is_active: - _LOGGER.debug("Stopping consumer.") - self._consumer.stop() - self._consumer = None - - self._rpc.close() - self._rpc = None - self._closed = True - _LOGGER.debug("Finished stopping manager.") - - if reason: - # Raise an exception if a reason is provided - _LOGGER.debug("reason for closing: %s" % reason) - if isinstance(reason, Exception): - raise reason - raise RuntimeError(reason) - - def _on_rpc_done(self, future): - """Triggered whenever the underlying RPC terminates without recovery. - - This is typically triggered from one of two threads: the background - consumer thread (when calling ``recv()`` produces a non-recoverable - error) or the grpc management thread (when cancelling the RPC). - - This method is *non-blocking*. It will start another thread to deal - with shutting everything down. This is to prevent blocking in the - background consumer and preventing it from being ``joined()``. - """ - _LOGGER.info("RPC termination has signaled manager shutdown.") - future = _maybe_wrap_exception(future) - thread = threading.Thread( - name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future} - ) - thread.daemon = True - thread.start() - - def unsubscribe(self): - self.close() - - @classmethod - def for_document( - cls, - document_ref, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ): - """ - Creates a watch snapshot listener for a document. snapshot_callback - receives a DocumentChange object, but may also start to get - targetChange and such soon - - Args: - document_ref: Reference to Document - snapshot_callback: callback to be called on snapshot - snapshot_class_instance: instance of DocumentSnapshot to make - snapshots with to pass to snapshot_callback - reference_class_instance: instance of DocumentReference to make - references - - """ - return cls( - document_ref, - document_ref._client, - { - "documents": {"documents": [document_ref._document_path]}, - "target_id": WATCH_TARGET_ID, - }, - document_watch_comparator, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ) - - @classmethod - def for_query( - cls, query, snapshot_callback, snapshot_class_instance, reference_class_instance - ): - query_target = firestore_pb2.Target.QueryTarget( - parent=query._client._database_string, structured_query=query._to_protobuf() - ) - - return cls( - query, - query._client, - {"query": query_target, "target_id": WATCH_TARGET_ID}, - query._comparator, - snapshot_callback, - snapshot_class_instance, - reference_class_instance, - ) - - def _on_snapshot_target_change_no_change(self, proto): - _LOGGER.debug("on_snapshot: target change: NO_CHANGE") - change = proto.target_change - - no_target_ids = change.target_ids is None or len(change.target_ids) == 0 - if no_target_ids and change.read_time and self.current: - # TargetChange.CURRENT followed by TargetChange.NO_CHANGE - # signals a consistent state. Invoke the onSnapshot - # callback as specified by the user. - self.push(change.read_time, change.resume_token) - - def _on_snapshot_target_change_add(self, proto): - _LOGGER.debug("on_snapshot: target change: ADD") - target_id = proto.target_change.target_ids[0] - if target_id != WATCH_TARGET_ID: - raise RuntimeError("Unexpected target ID %s sent by server" % target_id) - - def _on_snapshot_target_change_remove(self, proto): - _LOGGER.debug("on_snapshot: target change: REMOVE") - change = proto.target_change - - code = 13 - message = "internal error" - if change.cause: - code = change.cause.code - message = change.cause.message - - message = "Error %s: %s" % (code, message) - - raise RuntimeError(message) - - def _on_snapshot_target_change_reset(self, proto): - # Whatever changes have happened so far no longer matter. - _LOGGER.debug("on_snapshot: target change: RESET") - self._reset_docs() - - def _on_snapshot_target_change_current(self, proto): - _LOGGER.debug("on_snapshot: target change: CURRENT") - self.current = True - - def on_snapshot(self, proto): - """ - Called everytime there is a response from listen. Collect changes - and 'push' the changes in a batch to the customer when we receive - 'current' from the listen response. - - Args: - listen_response(`google.cloud.firestore_v1beta1.types.ListenResponse`): - Callback method that receives a object to - """ - TargetChange = firestore_pb2.TargetChange - - target_changetype_dispatch = { - TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change, - TargetChange.ADD: self._on_snapshot_target_change_add, - TargetChange.REMOVE: self._on_snapshot_target_change_remove, - TargetChange.RESET: self._on_snapshot_target_change_reset, - TargetChange.CURRENT: self._on_snapshot_target_change_current, - } - - target_change = proto.target_change - if str(target_change): - target_change_type = target_change.target_change_type - _LOGGER.debug("on_snapshot: target change: " + str(target_change_type)) - meth = target_changetype_dispatch.get(target_change_type) - if meth is None: - _LOGGER.info( - "on_snapshot: Unknown target change " + str(target_change_type) - ) - self.close( - reason="Unknown target change type: %s " % str(target_change_type) - ) - else: - try: - meth(proto) - except Exception as exc2: - _LOGGER.debug("meth(proto) exc: " + str(exc2)) - raise - - # NOTE: - # in other implementations, such as node, the backoff is reset here - # in this version bidi rpc is just used and will control this. - - elif str(proto.document_change): - _LOGGER.debug("on_snapshot: document change") - - # No other target_ids can show up here, but we still need to see - # if the targetId was in the added list or removed list. - target_ids = proto.document_change.target_ids or [] - removed_target_ids = proto.document_change.removed_target_ids or [] - changed = False - removed = False - - if WATCH_TARGET_ID in target_ids: - changed = True - - if WATCH_TARGET_ID in removed_target_ids: - removed = True - - if changed: - _LOGGER.debug("on_snapshot: document change: CHANGED") - - # google.cloud.firestore_v1beta1.types.DocumentChange - document_change = proto.document_change - # google.cloud.firestore_v1beta1.types.Document - document = document_change.document - - data = _helpers.decode_dict(document.fields, self._firestore) - - # Create a snapshot. As Document and Query objects can be - # passed we need to get a Document Reference in a more manual - # fashion than self._document_reference - document_name = document.name - db_str = self._firestore._database_string - db_str_documents = db_str + "/documents/" - if document_name.startswith(db_str_documents): - document_name = document_name[len(db_str_documents) :] - - document_ref = self._firestore.document(document_name) - - snapshot = self.DocumentSnapshot( - reference=document_ref, - data=data, - exists=True, - read_time=None, - create_time=document.create_time, - update_time=document.update_time, - ) - self.change_map[document.name] = snapshot - - elif removed: - _LOGGER.debug("on_snapshot: document change: REMOVED") - document = proto.document_change.document - self.change_map[document.name] = ChangeType.REMOVED - - # NB: document_delete and document_remove (as far as we, the client, - # are concerned) are functionally equivalent - - elif str(proto.document_delete): - _LOGGER.debug("on_snapshot: document change: DELETE") - name = proto.document_delete.document - self.change_map[name] = ChangeType.REMOVED - - elif str(proto.document_remove): - _LOGGER.debug("on_snapshot: document change: REMOVE") - name = proto.document_remove.document - self.change_map[name] = ChangeType.REMOVED - - elif proto.filter: - _LOGGER.debug("on_snapshot: filter update") - if proto.filter.count != self._current_size(): - # We need to remove all the current results. - self._reset_docs() - # The filter didn't match, so re-issue the query. - # TODO: reset stream method? - # self._reset_stream(); - - else: - _LOGGER.debug("UNKNOWN TYPE. UHOH") - self.close(reason=ValueError("Unknown listen response type: %s" % proto)) - - def push(self, read_time, next_resume_token): - """ - Assembles a new snapshot from the current set of changes and invokes - the user's callback. Clears the current changes on completion. - """ - deletes, adds, updates = Watch._extract_changes( - self.doc_map, self.change_map, read_time - ) - - updated_tree, updated_map, appliedChanges = self._compute_snapshot( - self.doc_tree, self.doc_map, deletes, adds, updates - ) - - if not self.has_pushed or len(appliedChanges): - # TODO: It is possible in the future we will have the tree order - # on insert. For now, we sort here. - key = functools.cmp_to_key(self._comparator) - keys = sorted(updated_tree.keys(), key=key) - - self._snapshot_callback( - keys, - appliedChanges, - datetime.datetime.fromtimestamp(read_time.seconds, pytz.utc), - ) - self.has_pushed = True - - self.doc_tree = updated_tree - self.doc_map = updated_map - self.change_map.clear() - self.resume_token = next_resume_token - - @staticmethod - def _extract_changes(doc_map, changes, read_time): - deletes = [] - adds = [] - updates = [] - - for name, value in changes.items(): - if value == ChangeType.REMOVED: - if name in doc_map: - deletes.append(name) - elif name in doc_map: - if read_time is not None: - value.read_time = read_time - updates.append(value) - else: - if read_time is not None: - value.read_time = read_time - adds.append(value) - - return (deletes, adds, updates) - - def _compute_snapshot( - self, doc_tree, doc_map, delete_changes, add_changes, update_changes - ): - updated_tree = doc_tree - updated_map = doc_map - - assert len(doc_tree) == len(doc_map), ( - "The document tree and document map should have the same " - + "number of entries." - ) - - def delete_doc(name, updated_tree, updated_map): - """ - Applies a document delete to the document tree and document map. - Returns the corresponding DocumentChange event. - """ - assert name in updated_map, "Document to delete does not exist" - old_document = updated_map.get(name) - # TODO: If a document doesn't exist this raises IndexError. Handle? - existing = updated_tree.find(old_document) - old_index = existing.index - updated_tree = updated_tree.remove(old_document) - del updated_map[name] - return ( - DocumentChange(ChangeType.REMOVED, old_document, old_index, -1), - updated_tree, - updated_map, - ) - - def add_doc(new_document, updated_tree, updated_map): - """ - Applies a document add to the document tree and the document map. - Returns the corresponding DocumentChange event. - """ - name = new_document.reference._document_path - assert name not in updated_map, "Document to add already exists" - updated_tree = updated_tree.insert(new_document, None) - new_index = updated_tree.find(new_document).index - updated_map[name] = new_document - return ( - DocumentChange(ChangeType.ADDED, new_document, -1, new_index), - updated_tree, - updated_map, - ) - - def modify_doc(new_document, updated_tree, updated_map): - """ - Applies a document modification to the document tree and the - document map. - Returns the DocumentChange event for successful modifications. - """ - name = new_document.reference._document_path - assert name in updated_map, "Document to modify does not exist" - old_document = updated_map.get(name) - if old_document.update_time != new_document.update_time: - remove_change, updated_tree, updated_map = delete_doc( - name, updated_tree, updated_map - ) - add_change, updated_tree, updated_map = add_doc( - new_document, updated_tree, updated_map - ) - return ( - DocumentChange( - ChangeType.MODIFIED, - new_document, - remove_change.old_index, - add_change.new_index, - ), - updated_tree, - updated_map, - ) - - return None, updated_tree, updated_map - - # Process the sorted changes in the order that is expected by our - # clients (removals, additions, and then modifications). We also need - # to sort the individual changes to assure that old_index/new_index - # keep incrementing. - appliedChanges = [] - - key = functools.cmp_to_key(self._comparator) - - # Deletes are sorted based on the order of the existing document. - delete_changes = sorted(delete_changes, key=key) - for name in delete_changes: - change, updated_tree, updated_map = delete_doc( - name, updated_tree, updated_map - ) - appliedChanges.append(change) - - add_changes = sorted(add_changes, key=key) - _LOGGER.debug("walk over add_changes") - for snapshot in add_changes: - _LOGGER.debug("in add_changes") - change, updated_tree, updated_map = add_doc( - snapshot, updated_tree, updated_map - ) - appliedChanges.append(change) - - update_changes = sorted(update_changes, key=key) - for snapshot in update_changes: - change, updated_tree, updated_map = modify_doc( - snapshot, updated_tree, updated_map - ) - if change is not None: - appliedChanges.append(change) - - assert len(updated_tree) == len(updated_map), ( - "The update document " - + "tree and document map should have the same number of entries." - ) - return (updated_tree, updated_map, appliedChanges) - - def _affects_target(self, target_ids, current_id): - if target_ids is None: - return True - - return current_id in target_ids - - def _current_size(self): - """ - Returns the current count of all documents, including the changes from - the current changeMap. - """ - deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None) - return len(self.doc_map) + len(adds) - len(deletes) - - def _reset_docs(self): - """ - Helper to clear the docs on RESET or filter mismatch. - """ - _LOGGER.debug("resetting documents") - self.change_map.clear() - self.resume_token = None - - # Mark each document as deleted. If documents are not deleted - # they will be sent again by the server. - for snapshot in self.doc_tree.keys(): - name = snapshot.reference._document_path - self.change_map[name] = ChangeType.REMOVED - - self.current = False diff --git a/firestore/noxfile.py b/firestore/noxfile.py deleted file mode 100644 index 87029aee6747..000000000000 --- a/firestore/noxfile.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! - -from __future__ import absolute_import -import os -import shutil - -import nox - - -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) -BLACK_VERSION = "black==19.3b0" -BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] - -if os.path.exists("samples"): - BLACK_PATHS.append("samples") - - -@nox.session(python="3.7") -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION, *LOCAL_DEPS) - session.run("black", "--check", *BLACK_PATHS) - session.run("flake8", "google", "tests") - - -@nox.session(python="3.6") -def blacken(session): - """Run black. - - Format code to uniform standard. - - This currently uses Python 3.6 due to the automated Kokoro run of synthtool. - That run uses an image that doesn't have 3.6 installed. Before updating this - check the state of the `gcp_ubuntu_config` we use for that Kokoro run. - """ - session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) - - -@nox.session(python="3.7") -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") - - -def default(session): - # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", ".") - - # Run py.test against the unit tests. - session.run( - "py.test", - "--quiet", - "--cov=google.cloud", - "--cov=tests.unit", - "--cov-append", - "--cov-config=.coveragerc", - "--cov-report=", - "--cov-fail-under=0", - os.path.join("tests", "unit"), - *session.posargs, - ) - - -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) -def unit(session): - """Run the unit test suite.""" - default(session) - - -@nox.session(python=["2.7", "3.7"]) -def system(session): - """Run the system test suite.""" - system_test_path = os.path.join("tests", "system.py") - system_test_folder_path = os.path.join("tests", "system") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("FIRESTORE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") - - system_test_exists = os.path.exists(system_test_path) - system_test_folder_exists = os.path.exists(system_test_folder_path) - # Sanity check: only run tests if found. - if not system_test_exists and not system_test_folder_exists: - session.skip("System tests were not found") - - # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") - - # Install all test dependencies, then install this package into the - # virtualenv's dist-packages. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "../test_utils/") - session.install("-e", ".") - - # Run py.test against the system tests. - if system_test_exists: - session.run("py.test", "--verbose", system_test_path, *session.posargs) - if system_test_folder_exists: - session.run("py.test", "--verbose", system_test_folder_path, *session.posargs) - - -@nox.session(python="3.7") -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python="3.7") -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/firestore/pylint.config.py b/firestore/pylint.config.py deleted file mode 100644 index 5d64b9d2f256..000000000000 --- a/firestore/pylint.config.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module is used to configure gcp-devrel-py-tools run-pylint.""" - -# Library configuration - -# library_additions = {} -# library_replacements = {} - -# Test configuration - -# test_additions = copy.deepcopy(library_additions) -# test_replacements = copy.deepcopy(library_replacements) diff --git a/firestore/setup.cfg b/firestore/setup.cfg deleted file mode 100644 index 3bd555500e37..000000000000 --- a/firestore/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[bdist_wheel] -universal = 1 diff --git a/firestore/setup.py b/firestore/setup.py deleted file mode 100644 index 8fafbd8521fc..000000000000 --- a/firestore/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -import setuptools - - -# Package metadata. - -name = "google-cloud-firestore" -description = "Google Cloud Firestore API client library" -version = "1.6.1" -# Should be one of: -# 'Development Status :: 3 - Alpha' -# 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Production/Stable' -release_status = "Development Status :: 4 - Beta" -dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", - "google-cloud-core >= 1.0.3, < 2.0dev", - "pytz", -] -extras = {} - - -# Setup boilerplate below this line. - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -# Only include packages under the 'google' namespace. Do not include tests, -# benchmarks, etc. -packages = [ - package for package in setuptools.find_packages() if package.startswith("google") -] - -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url="https://github.com/GoogleCloudPlatform/google-cloud-python", - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - namespace_packages=namespaces, - install_requires=dependencies, - extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", - include_package_data=True, - zip_safe=False, -) diff --git a/firestore/synth.metadata b/firestore/synth.metadata deleted file mode 100644 index 9865d73b75e2..000000000000 --- a/firestore/synth.metadata +++ /dev/null @@ -1,1937 +0,0 @@ -{ - "updateTime": "2020-01-30T13:25:19.480236Z", - "sources": [ - { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c1246a29e22b0f98e800a536b5b0da2d933a55f2", - "internalRef": "292310790", - "log": "c1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n" - } - }, - { - "template": { - "name": "python_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" - } - } - ], - "destinations": [ - { - "client": { - "source": "googleapis", - "apiName": "firestore", - "apiVersion": "v1beta1", - "language": "python", - "generator": "gapic", - "config": "google/firestore/artman_firestore.yaml" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "firestore", - "apiVersion": "v1", - "language": "python", - "generator": "gapic", - "config": "google/firestore/artman_firestore_v1.yaml" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "firestore_admin", - "apiVersion": "v1", - "language": "python", - "generator": "gapic", - "config": "google/firestore/admin/artman_firestore_v1.yaml" - } - } - ], - "newFiles": [ - { - "path": ".coveragerc" - }, - { - "path": ".flake8" - }, - { - "path": ".repo-metadata.json" - }, - { - "path": "CHANGELOG.md" - }, - { - "path": "LICENSE" - }, - { - "path": "MANIFEST.in" - }, - { - "path": "Makefile_v1" - }, - { - "path": "Makefile_v1beta1" - }, - { - "path": "README.rst" - }, - { - "path": "docs/README.rst" - }, - { - "path": "docs/_static/custom.css" - }, - { - "path": "docs/_templates/layout.html" - }, - { - "path": "docs/batch.rst" - }, - { - "path": "docs/changelog.md" - }, - { - "path": "docs/client.rst" - }, - { - "path": "docs/collection.rst" - }, - { - "path": "docs/conf.py" - }, - { - "path": "docs/document.rst" - }, - { - "path": "docs/field_path.rst" - }, - { - "path": "docs/index.rst" - }, - { - "path": "docs/query.rst" - }, - { - "path": "docs/transaction.rst" - }, - { - "path": "docs/transforms.rst" - }, - { - "path": "docs/types.rst" - }, - { - "path": "google/__init__.py" - }, - { - "path": "google/cloud/__init__.py" - }, - { - "path": "google/cloud/firestore.py" - }, - { - "path": "google/cloud/firestore_admin_v1/__init__.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/enums.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/firestore_admin_client.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/firestore_admin_client_config.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/firestore_admin_v1/gapic/transports/firestore_admin_grpc_transport.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/__init__.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/field.proto" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/field_pb2.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/field_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/firestore_admin.proto" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/firestore_admin_pb2.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/firestore_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/index.proto" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/index_pb2.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/index_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/location.proto" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/location_pb2.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/location_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/operation.proto" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/operation_pb2.py" - }, - { - "path": "google/cloud/firestore_admin_v1/proto/operation_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_admin_v1/types.py" - }, - { - "path": "google/cloud/firestore_v1/__init__.py" - }, - { - "path": "google/cloud/firestore_v1/_helpers.py" - }, - { - "path": "google/cloud/firestore_v1/batch.py" - }, - { - "path": "google/cloud/firestore_v1/client.py" - }, - { - "path": "google/cloud/firestore_v1/collection.py" - }, - { - "path": "google/cloud/firestore_v1/document.py" - }, - { - "path": "google/cloud/firestore_v1/field_path.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/enums.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/firestore_client.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/firestore_client_config.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/firestore_v1/gapic/transports/firestore_grpc_transport.py" - }, - { - "path": "google/cloud/firestore_v1/order.py" - }, - { - "path": "google/cloud/firestore_v1/proto/__init__.py" - }, - { - "path": "google/cloud/firestore_v1/proto/common.proto" - }, - { - "path": "google/cloud/firestore_v1/proto/common_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/common_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1/proto/document.proto" - }, - { - "path": "google/cloud/firestore_v1/proto/document_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/document_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1/proto/firestore.proto" - }, - { - "path": "google/cloud/firestore_v1/proto/firestore_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/firestore_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1/proto/query.proto" - }, - { - "path": "google/cloud/firestore_v1/proto/query_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/query_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1/proto/test_v1_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/tests_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/write.proto" - }, - { - "path": "google/cloud/firestore_v1/proto/write_pb2.py" - }, - { - "path": "google/cloud/firestore_v1/proto/write_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1/query.py" - }, - { - "path": "google/cloud/firestore_v1/transaction.py" - }, - { - "path": "google/cloud/firestore_v1/transforms.py" - }, - { - "path": "google/cloud/firestore_v1/types.py" - }, - { - "path": "google/cloud/firestore_v1/watch.py" - }, - { - "path": "google/cloud/firestore_v1beta1/__init__.py" - }, - { - "path": "google/cloud/firestore_v1beta1/_helpers.py" - }, - { - "path": "google/cloud/firestore_v1beta1/batch.py" - }, - { - "path": "google/cloud/firestore_v1beta1/client.py" - }, - { - "path": "google/cloud/firestore_v1beta1/collection.py" - }, - { - "path": "google/cloud/firestore_v1beta1/document.py" - }, - { - "path": "google/cloud/firestore_v1beta1/field_path.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/__init__.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/enums.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/firestore_client.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/firestore_client_config.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/firestore_v1beta1/gapic/transports/firestore_grpc_transport.py" - }, - { - "path": "google/cloud/firestore_v1beta1/order.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/__init__.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/admin/__init__.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/admin/firestore_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/admin/index_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/admin/index_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/common.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/common_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/common_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/document.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/document_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/document_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/event_flow_document_change_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/field.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/firestore.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/firestore_admin.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/firestore_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/firestore_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/index.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/location.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/operation.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/query.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/query_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/query_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/test_v1beta1_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/write.proto" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/write_pb2.py" - }, - { - "path": "google/cloud/firestore_v1beta1/proto/write_pb2_grpc.py" - }, - { - "path": "google/cloud/firestore_v1beta1/query.py" - }, - { - "path": "google/cloud/firestore_v1beta1/transaction.py" - }, - { - "path": "google/cloud/firestore_v1beta1/transforms.py" - }, - { - "path": "google/cloud/firestore_v1beta1/types.py" - }, - { - "path": "google/cloud/firestore_v1beta1/watch.py" - }, - { - "path": "noxfile.py" - }, - { - "path": "pylint.config.py" - }, - { - "path": "setup.cfg" - }, - { - "path": "setup.py" - }, - { - "path": "synth.metadata" - }, - { - "path": "synth.py" - }, - { - "path": "tests/__init__.py" - }, - { - "path": "tests/credentials.json.enc" - }, - { - "path": "tests/system/test_system.py" - }, - { - "path": "tests/system/util/cleanup_firestore_documents.py" - }, - { - "path": "tests/unit/__init__.py" - }, - { - "path": "tests/unit/gapic/v1/test_firestore_admin_client_v1.py" - }, - { - "path": "tests/unit/gapic/v1/test_firestore_client_v1.py" - }, - { - "path": "tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py" - }, - { - "path": "tests/unit/test_firestore_shim.py" - }, - { - "path": "tests/unit/v1/__init__.py" - }, - { - "path": "tests/unit/v1/test__helpers.py" - }, - { - "path": "tests/unit/v1/test_batch.py" - }, - { - "path": "tests/unit/v1/test_client.py" - }, - { - "path": "tests/unit/v1/test_collection.py" - }, - { - "path": "tests/unit/v1/test_cross_language.py" - }, - { - "path": "tests/unit/v1/test_document.py" - }, - { - "path": "tests/unit/v1/test_field_path.py" - }, - { - "path": "tests/unit/v1/test_order.py" - }, - { - "path": "tests/unit/v1/test_query.py" - }, - { - "path": "tests/unit/v1/test_transaction.py" - }, - { - "path": "tests/unit/v1/test_transforms.py" - }, - { - "path": "tests/unit/v1/test_watch.py" - }, - { - "path": "tests/unit/v1/testdata/create-all-transforms.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove-multi.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayremove.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion-multi.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/create-arrayunion.json" - }, - { - "path": "tests/unit/v1/testdata/create-basic.json" - }, - { - "path": "tests/unit/v1/testdata/create-complex.json" - }, - { - "path": "tests/unit/v1/testdata/create-del-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-del-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/create-empty.json" - }, - { - "path": "tests/unit/v1/testdata/create-nodel.json" - }, - { - "path": "tests/unit/v1/testdata/create-nosplit.json" - }, - { - "path": "tests/unit/v1/testdata/create-special-chars.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-alone.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-multi.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/create-st-with-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/create-st.json" - }, - { - "path": "tests/unit/v1/testdata/delete-exists-precond.json" - }, - { - "path": "tests/unit/v1/testdata/delete-no-precond.json" - }, - { - "path": "tests/unit/v1/testdata/delete-time-precond.json" - }, - { - "path": "tests/unit/v1/testdata/get-basic.json" - }, - { - "path": "tests/unit/v1/testdata/listen-add-mod-del-add.json" - }, - { - "path": "tests/unit/v1/testdata/listen-add-one.json" - }, - { - "path": "tests/unit/v1/testdata/listen-add-three.json" - }, - { - "path": "tests/unit/v1/testdata/listen-doc-remove.json" - }, - { - "path": "tests/unit/v1/testdata/listen-empty.json" - }, - { - "path": "tests/unit/v1/testdata/listen-filter-nop.json" - }, - { - "path": "tests/unit/v1/testdata/listen-multi-docs.json" - }, - { - "path": "tests/unit/v1/testdata/listen-nocurrent.json" - }, - { - "path": "tests/unit/v1/testdata/listen-nomod.json" - }, - { - "path": "tests/unit/v1/testdata/listen-removed-target-ids.json" - }, - { - "path": "tests/unit/v1/testdata/listen-reset.json" - }, - { - "path": "tests/unit/v1/testdata/listen-target-add-nop.json" - }, - { - "path": "tests/unit/v1/testdata/listen-target-add-wrong-id.json" - }, - { - "path": "tests/unit/v1/testdata/listen-target-remove.json" - }, - { - "path": "tests/unit/v1/testdata/query-arrayremove-cursor.json" - }, - { - "path": "tests/unit/v1/testdata/query-arrayremove-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-arrayunion-cursor.json" - }, - { - "path": "tests/unit/v1/testdata/query-arrayunion-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-bad-NaN.json" - }, - { - "path": "tests/unit/v1/testdata/query-bad-null.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap-order.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap-orderby-name.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap-where-eq.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap-where-neq-orderby.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap-where-neq.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-docsnap.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-endbefore-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-endbefore-empty.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-no-order.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-startat-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-startat-empty.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-vals-1a.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-vals-1b.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-vals-2.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-vals-docid.json" - }, - { - "path": "tests/unit/v1/testdata/query-cursor-vals-last-wins.json" - }, - { - "path": "tests/unit/v1/testdata/query-del-cursor.json" - }, - { - "path": "tests/unit/v1/testdata/query-del-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-invalid-operator.json" - }, - { - "path": "tests/unit/v1/testdata/query-invalid-path-order.json" - }, - { - "path": "tests/unit/v1/testdata/query-invalid-path-select.json" - }, - { - "path": "tests/unit/v1/testdata/query-invalid-path-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-offset-limit-last-wins.json" - }, - { - "path": "tests/unit/v1/testdata/query-offset-limit.json" - }, - { - "path": "tests/unit/v1/testdata/query-order.json" - }, - { - "path": "tests/unit/v1/testdata/query-select-empty.json" - }, - { - "path": "tests/unit/v1/testdata/query-select-last-wins.json" - }, - { - "path": "tests/unit/v1/testdata/query-select.json" - }, - { - "path": "tests/unit/v1/testdata/query-st-cursor.json" - }, - { - "path": "tests/unit/v1/testdata/query-st-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-where-2.json" - }, - { - "path": "tests/unit/v1/testdata/query-where-NaN.json" - }, - { - "path": "tests/unit/v1/testdata/query-where-null.json" - }, - { - "path": "tests/unit/v1/testdata/query-where.json" - }, - { - "path": "tests/unit/v1/testdata/query-wrong-collection.json" - }, - { - "path": "tests/unit/v1/testdata/set-all-transforms.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove-multi.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayremove.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion-multi.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/set-arrayunion.json" - }, - { - "path": "tests/unit/v1/testdata/set-basic.json" - }, - { - "path": "tests/unit/v1/testdata/set-complex.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-merge-alone.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-merge.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-mergeall.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-nomerge.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-nonleaf.json" - }, - { - "path": "tests/unit/v1/testdata/set-del-wo-merge.json" - }, - { - "path": "tests/unit/v1/testdata/set-empty.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge-fp.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge-nonleaf.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge-prefix.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge-present.json" - }, - { - "path": "tests/unit/v1/testdata/set-merge.json" - }, - { - "path": "tests/unit/v1/testdata/set-mergeall-empty.json" - }, - { - "path": "tests/unit/v1/testdata/set-mergeall-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-mergeall.json" - }, - { - "path": "tests/unit/v1/testdata/set-nodel.json" - }, - { - "path": "tests/unit/v1/testdata/set-nosplit.json" - }, - { - "path": "tests/unit/v1/testdata/set-special-chars.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-alone-mergeall.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-alone.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-merge-both.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-merge-nonleaf-alone.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-merge-nonleaf.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-merge-nowrite.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-mergeall.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-multi.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-nomerge.json" - }, - { - "path": "tests/unit/v1/testdata/set-st-with-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/set-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-all-transforms.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayremove.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-arrayunion.json" - }, - { - "path": "tests/unit/v1/testdata/update-badchar.json" - }, - { - "path": "tests/unit/v1/testdata/update-basic.json" - }, - { - "path": "tests/unit/v1/testdata/update-complex.json" - }, - { - "path": "tests/unit/v1/testdata/update-del-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-del-dot.json" - }, - { - "path": "tests/unit/v1/testdata/update-del-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-del-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-del-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-del.json" - }, - { - "path": "tests/unit/v1/testdata/update-exists-precond.json" - }, - { - "path": "tests/unit/v1/testdata/update-fp-empty-component.json" - }, - { - "path": "tests/unit/v1/testdata/update-nested-transform-and-nested-value.json" - }, - { - "path": "tests/unit/v1/testdata/update-no-paths.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-all-transforms.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayremove.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion-with-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-arrayunion.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-basic.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-complex.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-del-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-del-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-del-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-del-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-del.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-exists-precond.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-del.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-dup-transforms.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-dup.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-empty-component.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-empty.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-fp-nosplit.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-nested-transform-and-nested-value.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-no-paths.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-prefix-1.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-prefix-2.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-prefix-3.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-special-chars.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st-with-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-paths-uptime.json" - }, - { - "path": "tests/unit/v1/testdata/update-prefix-1.json" - }, - { - "path": "tests/unit/v1/testdata/update-prefix-2.json" - }, - { - "path": "tests/unit/v1/testdata/update-prefix-3.json" - }, - { - "path": "tests/unit/v1/testdata/update-quoting.json" - }, - { - "path": "tests/unit/v1/testdata/update-split-top-level.json" - }, - { - "path": "tests/unit/v1/testdata/update-split.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-alone.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-dot.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-multi.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-noarray-nested.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-noarray.json" - }, - { - "path": "tests/unit/v1/testdata/update-st-with-empty-map.json" - }, - { - "path": "tests/unit/v1/testdata/update-st.json" - }, - { - "path": "tests/unit/v1/testdata/update-uptime.json" - }, - { - "path": "tests/unit/v1beta1/__init__.py" - }, - { - "path": "tests/unit/v1beta1/test__helpers.py" - }, - { - "path": "tests/unit/v1beta1/test_batch.py" - }, - { - "path": "tests/unit/v1beta1/test_client.py" - }, - { - "path": "tests/unit/v1beta1/test_collection.py" - }, - { - "path": "tests/unit/v1beta1/test_cross_language.py" - }, - { - "path": "tests/unit/v1beta1/test_document.py" - }, - { - "path": "tests/unit/v1beta1/test_field_path.py" - }, - { - "path": "tests/unit/v1beta1/test_order.py" - }, - { - "path": "tests/unit/v1beta1/test_query.py" - }, - { - "path": "tests/unit/v1beta1/test_transaction.py" - }, - { - "path": "tests/unit/v1beta1/test_transforms.py" - }, - { - "path": "tests/unit/v1beta1/test_watch.py" - }, - { - "path": "tests/unit/v1beta1/testdata/create-all-transforms.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayremove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-arrayunion.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-basic.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-complex.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-del-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-del-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-nodel.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-nosplit.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-special-chars.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st-with-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/create-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/delete-exists-precond.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/delete-no-precond.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/delete-time-precond.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/get-basic.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-add-mod-del-add.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-add-one.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-add-three.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-doc-remove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-filter-nop.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-multi-docs.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-nocurrent.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-nomod.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-removed-target-ids.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-reset.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-target-add-nop.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-target-add-wrong-id.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/listen-target-remove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-arrayremove-cursor.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-arrayremove-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-arrayunion-cursor.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-arrayunion-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-bad-NaN.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-bad-null.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap-order.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap-orderby-name.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap-where-eq.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq-orderby.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-docsnap.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-endbefore-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-endbefore-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-no-order.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-startat-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-startat-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-vals-1a.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-vals-1b.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-vals-2.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-vals-docid.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-cursor-vals-last-wins.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-del-cursor.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-del-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-invalid-operator.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-invalid-path-order.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-invalid-path-select.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-invalid-path-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-offset-limit-last-wins.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-offset-limit.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-order.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-select-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-select-last-wins.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-select.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-st-cursor.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-st-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-where-2.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-where-NaN.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-where-null.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-where.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/query-wrong-collection.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-all-transforms.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayremove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-arrayunion.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-basic.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-complex.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-merge-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-merge.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-mergeall.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-nomerge.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-nonleaf.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-del-wo-merge.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge-fp.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge-nonleaf.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge-prefix.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge-present.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-merge.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-mergeall-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-mergeall-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-mergeall.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-nodel.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-nosplit.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-special-chars.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-alone-mergeall.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-merge-both.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-merge-nonleaf-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-merge-nonleaf.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-merge-nowrite.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-mergeall.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-nomerge.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st-with-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/set-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/test-suite.binproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-all-transforms.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayremove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-arrayunion.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-badchar.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-basic.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-complex.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del-dot.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-del.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-exists-precond.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-fp-empty-component.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-nested-transform-and-nested-value.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-no-paths.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-all-transforms.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayremove.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion-with-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-arrayunion.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-basic.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-complex.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-del-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-del-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-del-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-del-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-del.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-exists-precond.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-del.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-dup-transforms.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-dup.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-empty-component.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-empty.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-fp-nosplit.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-no-paths.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-prefix-1.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-prefix-2.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-prefix-3.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-special-chars.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st-with-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-paths-uptime.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-prefix-1.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-prefix-2.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-prefix-3.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-quoting.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-split-top-level.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-split.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-alone.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-dot.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-multi.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-noarray-nested.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-noarray.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st-with-empty-map.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-st.textproto" - }, - { - "path": "tests/unit/v1beta1/testdata/update-uptime.textproto" - } - ] -} \ No newline at end of file diff --git a/firestore/synth.py b/firestore/synth.py deleted file mode 100644 index 3f9adae43767..000000000000 --- a/firestore/synth.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script is used to synthesize generated parts of this library.""" -import synthtool as s -from synthtool import gcp - -gapic = gcp.GAPICGenerator() -common = gcp.CommonTemplates() -versions = [ - ("v1beta1", "artman_firestore.yaml"), - ("v1", "artman_firestore_v1.yaml"), -] -admin_versions = [ - ("v1", "artman_firestore_v1.yaml"), -] - - -# ---------------------------------------------------------------------------- -# Generate firestore GAPIC layer -# ---------------------------------------------------------------------------- -for version, artman_config in versions: - library = gapic.py_library( - "firestore", - version, - config_path=f"/google/firestore/{artman_config}", - artman_output_name=f"firestore-{version}", - include_protos=True, - ) - - s.move(library / f"google/cloud/firestore_{version}/proto") - s.move(library / f"google/cloud/firestore_{version}/gapic") - s.move(library / f"tests/unit/gapic/{version}") - - s.replace( - f"tests/unit/gapic/{version}/test_firestore_client_{version}.py", - f"from google.cloud import firestore_{version}", - f"from google.cloud.firestore_{version}.gapic import firestore_client", - ) - - s.replace( - f"tests/unit/gapic/{version}/test_firestore_client_{version}.py", - f"client = firestore_{version}.FirestoreClient", - "client = firestore_client.FirestoreClient", - ) - - -# ---------------------------------------------------------------------------- -# Generate firestore admin GAPIC layer -# ---------------------------------------------------------------------------- -for version, artman_config in admin_versions: - library = gapic.py_library( - "firestore_admin", - f"{version}", - config_path=f"/google/firestore/admin/{artman_config}", - artman_output_name=f"firestore-admin-{version}", - include_protos=True, - ) - s.move(library / f"google/cloud/firestore_admin_{version}") - s.move(library / "tests") - - s.replace( - f"google/cloud/firestore_admin_{version}/gapic/firestore_admin_client.py", - "'google-cloud-firestore-admin'", - "'google-cloud-firestore'", - ) - - s.replace( - "google/**/*.py", - f"from google\.cloud\.firestore\.admin_{version}.proto", - f"from google.cloud.firestore_admin_{version}.proto", - ) - -# ---------------------------------------------------------------------------- -# Add templated files -# ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=100) -s.move(templated_files) - -s.replace( - "noxfile.py", - "GOOGLE_APPLICATION_CREDENTIALS", - "FIRESTORE_APPLICATION_CREDENTIALS", -) - -s.replace( - "noxfile.py", - '"--quiet", system_test', - '"--verbose", system_test', -) - - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/firestore/tests/__init__.py b/firestore/tests/__init__.py deleted file mode 100644 index ab6729095248..000000000000 --- a/firestore/tests/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/firestore/tests/credentials.json.enc b/firestore/tests/credentials.json.enc deleted file mode 100644 index 116006733a7c..000000000000 --- a/firestore/tests/credentials.json.enc +++ /dev/null @@ -1,49 +0,0 @@ -U2FsdGVkX181Zp8xOq7t5YHHn2l4tdYVICGvNLvusMtFxrKj1zKI2NP8nKstyA8O -xDlBMBMoVNV1GgPPDQ7Hihb6vYwWlSVcvisCqpgz7P4bTFeyheJso1MVO1maS0Ha -ESj8HXrCkMhVv62LogkMXyJF+bsZy9BwaAg9502IVpmYKEwt8CoA+qZwSngpj0Eh -eFwiulHcu6JPVjMFVKhq/PW3YtJa876VKVDB0dEGWMoA9bFVqcIi4js8sRCbsSj+ -IYnOkZyQ4E/3t7u/dIyELVsapefmH+GOfg56cD7ZtSkDyBKQp0YylqvRCXytsfpG -WV8qgOLdALPWoI7kjUV6dcwK9jf1HpEAf+LK0gm483qq17UM4gZQSD/dmFnEZrHT -PUZNxhW9iV/UVOvhNjrvzRxADT+tuW8DoPEVH0ZJm05ui21cwLfieqIvN6SbZDvO -aDS9OUZSFfkReGc5F9UHHBw6T1uG/fEGnu2oFl98TWw6JgZfhsNnqlDQyjcI/aKV -8/oP35x40Ky3V5yBNQGnbsV8N5Btbs5oDI8SGrzEamwwZP0AsdB7nApxjcmDsCgH -nAWR9Kvpu4OY1X+27xjALWoIxcPAhs5RSXyY7oC33JdhcwqKMIsls+K63P1wZ2kd -pb5Cen0dAAXf2tc5GsPAsBJNfU0X8NJ+24nXfBvbkxo8paeoGaP3WQQpkhy/Dd/i -EMNkH++uQAYecFEOvFnjmwo9xzuGiaP60ZQDyGONiaHftHeLts6/aVkhzFFLcigE -lUepi3cEX143+ciC3+C/4kuiuwxHIrQjfI7izxh6+KquKgKGwgcrjqu7Pz3BgsGw -fqH8lXjQK+oB0tmc52kSjopa40JT8pVgMOzhu1rbgQBkt4ByzFL4nC291PtMB27C -mCBZ/RL+xObGuRL1t/jcK9lIbA+8CTrDMwpqCzU1IZgdD2mBXg8KK+iPV8vOjorN -G+kEAeuyGJY6wsvAILK58JoppZrBU1IAqTdypoNkg5tUs08z2d0H4YOjRq8+2F13 -fCW0+vv5OaEnPniS2edPcGwIWIiLbTFqlkx+PFpvuB0riS+j5vaZlaSIoy8EnVjp -QOBriBjX9cXHw6nzWFJDJwsLlddVGyqCIKtr1mHtvjYZQ9w75IjHip1fimHI2Dhn -05kAfC5c2b1R0Y0NrUaSx/x2Q6RZ8R1NoI25rfvKhZmtrF8MNWBklKcAImWhipFy -I2mYX9jj4dvlXaXt5oYKvMfR4EZMgjTtbg0oGafmnJQEYWFahtSEotT/ZHYHfFNr -HXXinXhVi5VPTyoxj9jxNbt9v0y5PXcyI2ize4pr5cwcef6LMr8og9JAABFi1nbm -AAQacoRcDdY0m8RAj0a1sGcjoCMOdn0yYMeyScz35gnYQQaIdw/SLQ/q40KOK9+J -kqQJmTAn8m4M4qYV12i3m1+5JRQuJ4nfA5a2mi63eNJlar/EuM9B5FFxdEdvzYBr -7g77rQppV0d/Ufw8lCFbU1fuVxUz79FfFF78a+lhqxzmww08SyoFHZO0k5rFDZdW -C7CYv0va5Yoi2gPfjngutrsfExenFbFxP2GbndHe8GOT31rBHAvKLiokjkgNlWyw -Ew8JngWjufu2BWspQfDpFYX8DGmxfT0E22QWoiPrn96Hfbb8xuDw/rVUDzBMOak4 -ry/6f8zpNjAPgjfBGKHnARGOKPUV3Qd3JKJ4uCSvyvfKkXg3HOl/H1WGRu38uZFs -JDIK4SyLL8oYQwQyqpH7MmW3WQBBBkK/HoalezhL4/1F0AAGyZPIhWKlTFCsOMiY -9qAkOSeDFjhkAuUXHWvXc3Lu9Yl3aKGovZXmHiTUqAXKCeoM97u4a4/E3ByV/aF0 -NEL4X6QWOWMPFWrumeanz4hOk1XxwLzV0HEDR+FNXrNoIdvX52T5Qz46RgicWhAV -o0xW/xtnlWsw9fkzWcUeAcxFUsMVpmz9OC83jpF9MD2kBSDq5WYIKsqtcuEiBC3i -8kAylDQBYrDE9H21AXecUkWL+xBfs8S4rx6mscxyq80glq/QrU3mj55B5bIW0dUL -d/DhqqCtL+JRKCSQ75OKJoVRKGSQ6MqTZ6vpE26rU4jH9PQf9mcsCYll3OKdYWR+ -JiATGEqihDxyVLzZucUqemOQc3HZ9B4AVCvsD3heRvxMWQw6ylQCBhe5oYeSVKPv -/FEsUT+NyxtrZgui4/V9lYOcblgoxvPzcaUhlzEIOl5foxmOGtd4g0DCK9yrpUo5 -djeMks5wVgqgVsDsCLimd42WivCOGswzzxyGwX1A5JvBrktPZcEGYCBXOwenEM4n -aNR6W6b/0B/i30pEL2TO1TTRc8XU7pgOsKgCr5pDwuAhO1Me2aWL6cfF9MU0+pIJ -OVysd9GSty0Uv/oijI4W/ID6ar5/r9T+yJYc77qU5cxmqNwjm2RuY8KJidGsIvXw -nrjpiTVzKMHlTQlINCUf7dKxRQxvuWjH4zyu/ZtlpfaSY+2QfSjv0yxf9uTyGC2S -KIK06MiRr6fOhcSdNJUFF0i9y30+1ocFyCnigoydG3Zz9sJGSX1+VPpnx4d4cHWa -44h0XQZWY5KHsFJUuwnMI3bsn+GU0rGY5QzjGJZWwYsMU3GmVOUmPeTj060/1b1d -IRs03RF2oYOWXLYTBfkXgmkOPACFZa7nZf58c7B8iccvpPu6LhAT9KTCTG8QhKsC -Pi3W0v33OJUBURGEDBMg9h0D1U2xBrAAxxYSRD5hEOFJGaJdee6kYCnQ5DMBEa4h -arH23X4a7az/9ENLmrTok7g+LNTqahhsWLbBVPTmNRhbZAXfHbB58lsTG8/bckxP -9+JpIDfOCh3gA32dXsDauZ3SbNMwv+bLUUU6CpnzWXhMRzNXPT6euEG6HNyjpG5/ -WrS0/pli83i/82/8gal7aVWA9EabFnAyHXMINzf0mi/zxdxlyvmryb+RWiF19c6R -cN2P5fZnDE8BcWRQ0gPDQPCRd8b7BIywAmHHkwUJkbObaJWfNrT2ns1zC5nF67Rs -eUnfI1BbImEbE6r7f21cOQHFocW4oTBIRRYMFGgZKjHjbAcYFkewFUw24DaxbZS5 -NWqeTf5FMT/UOcDod/Sz7DWoXvYsLGYruAwi9TBAMIM= diff --git a/firestore/tests/system/test_system.py b/firestore/tests/system/test_system.py deleted file mode 100644 index 71ac07fcee74..000000000000 --- a/firestore/tests/system/test_system.py +++ /dev/null @@ -1,1138 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import math -import operator -import os -import re - -from google.oauth2 import service_account -from google.protobuf import timestamp_pb2 -import pytest -import six - -from google.api_core.exceptions import AlreadyExists -from google.api_core.exceptions import FailedPrecondition -from google.api_core.exceptions import InvalidArgument -from google.api_core.exceptions import NotFound -from google.cloud._helpers import _pb_timestamp_to_datetime -from google.cloud._helpers import UTC -from google.cloud import firestore_v1 as firestore -from test_utils.system import unique_resource_id - -from time import sleep - -FIRESTORE_CREDS = os.environ.get("FIRESTORE_APPLICATION_CREDENTIALS") -FIRESTORE_PROJECT = os.environ.get("GCLOUD_PROJECT") -RANDOM_ID_REGEX = re.compile("^[a-zA-Z0-9]{20}$") -MISSING_DOCUMENT = "No document to update: " -DOCUMENT_EXISTS = "Document already exists: " -UNIQUE_RESOURCE_ID = unique_resource_id("-") - - -@pytest.fixture(scope=u"module") -def client(): - credentials = service_account.Credentials.from_service_account_file(FIRESTORE_CREDS) - project = FIRESTORE_PROJECT or credentials.project_id - yield firestore.Client(project=project, credentials=credentials) - - -@pytest.fixture -def cleanup(): - operations = [] - yield operations.append - - for operation in operations: - operation() - - -def test_collections(client): - collections = list(client.collections()) - assert isinstance(collections, list) - - -def test_create_document(client, cleanup): - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - collection_id = "doc-create" + UNIQUE_RESOURCE_ID - document_id = "doc" + UNIQUE_RESOURCE_ID - document = client.document(collection_id, document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - - data = { - "now": firestore.SERVER_TIMESTAMP, - "eenta-ger": 11, - "bites": b"\xe2\x98\x83 \xe2\x9b\xb5", - "also": {"nestednow": firestore.SERVER_TIMESTAMP, "quarter": 0.25}, - } - write_result = document.create(data) - updated = _pb_timestamp_to_datetime(write_result.update_time) - delta = updated - now - # Allow a bit of clock skew, but make sure timestamps are close. - assert -300.0 < delta.total_seconds() < 300.0 - - with pytest.raises(AlreadyExists): - document.create(data) - - # Verify the server times. - snapshot = document.get() - stored_data = snapshot.to_dict() - server_now = stored_data["now"] - - delta = updated - server_now - # NOTE: We could check the ``transform_results`` from the write result - # for the document transform, but this value gets dropped. Instead - # we make sure the timestamps are close. - assert 0.0 <= delta.total_seconds() < 5.0 - expected_data = { - "now": server_now, - "eenta-ger": data["eenta-ger"], - "bites": data["bites"], - "also": {"nestednow": server_now, "quarter": data["also"]["quarter"]}, - } - assert stored_data == expected_data - - -def test_create_document_w_subcollection(client, cleanup): - collection_id = "doc-create-sub" + UNIQUE_RESOURCE_ID - document_id = "doc" + UNIQUE_RESOURCE_ID - document = client.document(collection_id, document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - - data = {"now": firestore.SERVER_TIMESTAMP} - document.create(data) - - child_ids = ["child1", "child2"] - - for child_id in child_ids: - subcollection = document.collection(child_id) - _, subdoc = subcollection.add({"foo": "bar"}) - cleanup(subdoc.delete) - - children = document.collections() - assert sorted(child.id for child in children) == sorted(child_ids) - - -def test_cannot_use_foreign_key(client, cleanup): - document_id = "cannot" + UNIQUE_RESOURCE_ID - document = client.document("foreign-key", document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - - other_client = firestore.Client( - project="other-prahj", credentials=client._credentials, database="dee-bee" - ) - assert other_client._database_string != client._database_string - fake_doc = other_client.document("foo", "bar") - with pytest.raises(InvalidArgument): - document.create({"ref": fake_doc}) - - -def assert_timestamp_less(timestamp_pb1, timestamp_pb2): - dt_val1 = _pb_timestamp_to_datetime(timestamp_pb1) - dt_val2 = _pb_timestamp_to_datetime(timestamp_pb2) - assert dt_val1 < dt_val2 - - -def test_no_document(client): - document_id = "no_document" + UNIQUE_RESOURCE_ID - document = client.document("abcde", document_id) - snapshot = document.get() - assert snapshot.to_dict() is None - - -def test_document_set(client, cleanup): - document_id = "for-set" + UNIQUE_RESOURCE_ID - document = client.document("i-did-it", document_id) - # Add to clean-up before API request (in case ``set()`` fails). - cleanup(document.delete) - - # 0. Make sure the document doesn't exist yet - snapshot = document.get() - assert snapshot.to_dict() is None - - # 1. Use ``create()`` to create the document. - data1 = {"foo": 88} - write_result1 = document.create(data1) - snapshot1 = document.get() - assert snapshot1.to_dict() == data1 - # Make sure the update is what created the document. - assert snapshot1.create_time == snapshot1.update_time - assert snapshot1.update_time == write_result1.update_time - - # 2. Call ``set()`` again to overwrite. - data2 = {"bar": None} - write_result2 = document.set(data2) - snapshot2 = document.get() - assert snapshot2.to_dict() == data2 - # Make sure the create time hasn't changed. - assert snapshot2.create_time == snapshot1.create_time - assert snapshot2.update_time == write_result2.update_time - - -def test_document_integer_field(client, cleanup): - document_id = "for-set" + UNIQUE_RESOURCE_ID - document = client.document("i-did-it", document_id) - # Add to clean-up before API request (in case ``set()`` fails). - cleanup(document.delete) - - data1 = {"1a": {"2b": "3c", "ab": "5e"}, "6f": {"7g": "8h", "cd": "0j"}} - document.create(data1) - - data2 = {"1a.ab": "4d", "6f.7g": "9h"} - document.update(data2) - snapshot = document.get() - expected = {"1a": {"2b": "3c", "ab": "4d"}, "6f": {"7g": "9h", "cd": "0j"}} - assert snapshot.to_dict() == expected - - -def test_document_set_merge(client, cleanup): - document_id = "for-set" + UNIQUE_RESOURCE_ID - document = client.document("i-did-it", document_id) - # Add to clean-up before API request (in case ``set()`` fails). - cleanup(document.delete) - - # 0. Make sure the document doesn't exist yet - snapshot = document.get() - assert not snapshot.exists - - # 1. Use ``create()`` to create the document. - data1 = {"name": "Sam", "address": {"city": "SF", "state": "CA"}} - write_result1 = document.create(data1) - snapshot1 = document.get() - assert snapshot1.to_dict() == data1 - # Make sure the update is what created the document. - assert snapshot1.create_time == snapshot1.update_time - assert snapshot1.update_time == write_result1.update_time - - # 2. Call ``set()`` to merge - data2 = {"address": {"city": "LA"}} - write_result2 = document.set(data2, merge=True) - snapshot2 = document.get() - assert snapshot2.to_dict() == { - "name": "Sam", - "address": {"city": "LA", "state": "CA"}, - } - # Make sure the create time hasn't changed. - assert snapshot2.create_time == snapshot1.create_time - assert snapshot2.update_time == write_result2.update_time - - -def test_document_set_w_int_field(client, cleanup): - document_id = "set-int-key" + UNIQUE_RESOURCE_ID - document = client.document("i-did-it", document_id) - # Add to clean-up before API request (in case ``set()`` fails). - cleanup(document.delete) - - # 0. Make sure the document doesn't exist yet - snapshot = document.get() - assert not snapshot.exists - - # 1. Use ``create()`` to create the document. - before = {"testing": "1"} - document.create(before) - - # 2. Replace using ``set()``. - data = {"14": {"status": "active"}} - document.set(data) - - # 3. Verify replaced data. - snapshot1 = document.get() - assert snapshot1.to_dict() == data - - -def test_document_update_w_int_field(client, cleanup): - # Attempt to reproduce #5489. - document_id = "update-int-key" + UNIQUE_RESOURCE_ID - document = client.document("i-did-it", document_id) - # Add to clean-up before API request (in case ``set()`` fails). - cleanup(document.delete) - - # 0. Make sure the document doesn't exist yet - snapshot = document.get() - assert not snapshot.exists - - # 1. Use ``create()`` to create the document. - before = {"testing": "1"} - document.create(before) - - # 2. Add values using ``update()``. - data = {"14": {"status": "active"}} - document.update(data) - - # 3. Verify updated data. - expected = before.copy() - expected.update(data) - snapshot1 = document.get() - assert snapshot1.to_dict() == expected - - -def test_update_document(client, cleanup): - document_id = "for-update" + UNIQUE_RESOURCE_ID - document = client.document("made", document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - - # 0. Try to update before the document exists. - with pytest.raises(NotFound) as exc_info: - document.update({"not": "there"}) - assert exc_info.value.message.startswith(MISSING_DOCUMENT) - assert document_id in exc_info.value.message - - # 1. Try to update before the document exists (now with an option). - with pytest.raises(NotFound) as exc_info: - document.update({"still": "not-there"}) - assert exc_info.value.message.startswith(MISSING_DOCUMENT) - assert document_id in exc_info.value.message - - # 2. Update and create the document (with an option). - data = {"foo": {"bar": "baz"}, "scoop": {"barn": 981}, "other": True} - write_result2 = document.create(data) - - # 3. Send an update without a field path (no option). - field_updates3 = {"foo": {"quux": 800}} - write_result3 = document.update(field_updates3) - assert_timestamp_less(write_result2.update_time, write_result3.update_time) - snapshot3 = document.get() - expected3 = { - "foo": field_updates3["foo"], - "scoop": data["scoop"], - "other": data["other"], - } - assert snapshot3.to_dict() == expected3 - - # 4. Send an update **with** a field path and a delete and a valid - # "last timestamp" option. - field_updates4 = {"scoop.silo": None, "other": firestore.DELETE_FIELD} - option4 = client.write_option(last_update_time=snapshot3.update_time) - write_result4 = document.update(field_updates4, option=option4) - assert_timestamp_less(write_result3.update_time, write_result4.update_time) - snapshot4 = document.get() - expected4 = { - "foo": field_updates3["foo"], - "scoop": {"barn": data["scoop"]["barn"], "silo": field_updates4["scoop.silo"]}, - } - assert snapshot4.to_dict() == expected4 - - # 5. Call ``update()`` with invalid (in the past) "last timestamp" option. - assert_timestamp_less(option4._last_update_time, snapshot4.update_time) - with pytest.raises(FailedPrecondition) as exc_info: - document.update({"bad": "time-past"}, option=option4) - - # 6. Call ``update()`` with invalid (in future) "last timestamp" option. - timestamp_pb = timestamp_pb2.Timestamp( - seconds=snapshot4.update_time.nanos + 3600, nanos=snapshot4.update_time.nanos - ) - option6 = client.write_option(last_update_time=timestamp_pb) - with pytest.raises(FailedPrecondition) as exc_info: - document.update({"bad": "time-future"}, option=option6) - - -def check_snapshot(snapshot, document, data, write_result): - assert snapshot.reference is document - assert snapshot.to_dict() == data - assert snapshot.exists - assert snapshot.create_time == write_result.update_time - assert snapshot.update_time == write_result.update_time - - -def test_document_get(client, cleanup): - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - document_id = "for-get" + UNIQUE_RESOURCE_ID - document = client.document("created", document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - - # First make sure it doesn't exist. - assert not document.get().exists - - ref_doc = client.document("top", "middle1", "middle2", "bottom") - data = { - "turtle": "power", - "cheese": 19.5, - "fire": 199099299, - "referee": ref_doc, - "gio": firestore.GeoPoint(45.5, 90.0), - "deep": [u"some", b"\xde\xad\xbe\xef"], - "map": {"ice": True, "water": None, "vapor": {"deeper": now}}, - } - write_result = document.create(data) - snapshot = document.get() - check_snapshot(snapshot, document, data, write_result) - - -def test_document_delete(client, cleanup): - document_id = "deleted" + UNIQUE_RESOURCE_ID - document = client.document("here-to-be", document_id) - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document.delete) - document.create({"not": "much"}) - - # 1. Call ``delete()`` with invalid (in the past) "last timestamp" option. - snapshot1 = document.get() - timestamp_pb = timestamp_pb2.Timestamp( - seconds=snapshot1.update_time.nanos - 3600, nanos=snapshot1.update_time.nanos - ) - option1 = client.write_option(last_update_time=timestamp_pb) - with pytest.raises(FailedPrecondition): - document.delete(option=option1) - - # 2. Call ``delete()`` with invalid (in future) "last timestamp" option. - timestamp_pb = timestamp_pb2.Timestamp( - seconds=snapshot1.update_time.nanos + 3600, nanos=snapshot1.update_time.nanos - ) - option2 = client.write_option(last_update_time=timestamp_pb) - with pytest.raises(FailedPrecondition): - document.delete(option=option2) - - # 3. Actually ``delete()`` the document. - delete_time3 = document.delete() - - # 4. ``delete()`` again, even though we know the document is gone. - delete_time4 = document.delete() - assert_timestamp_less(delete_time3, delete_time4) - - -def test_collection_add(client, cleanup): - collection_id = "coll-add" + UNIQUE_RESOURCE_ID - collection1 = client.collection(collection_id) - collection2 = client.collection(collection_id, "doc", "child") - collection3 = client.collection(collection_id, "table", "child") - explicit_doc_id = "hula" + UNIQUE_RESOURCE_ID - - assert set(collection1.list_documents()) == set() - assert set(collection2.list_documents()) == set() - assert set(collection3.list_documents()) == set() - - # Auto-ID at top-level. - data1 = {"foo": "bar"} - update_time1, document_ref1 = collection1.add(data1) - cleanup(document_ref1.delete) - assert set(collection1.list_documents()) == {document_ref1} - assert set(collection2.list_documents()) == set() - assert set(collection3.list_documents()) == set() - snapshot1 = document_ref1.get() - assert snapshot1.to_dict() == data1 - assert snapshot1.update_time == update_time1 - assert RANDOM_ID_REGEX.match(document_ref1.id) - - # Explicit ID at top-level. - data2 = {"baz": 999} - update_time2, document_ref2 = collection1.add(data2, document_id=explicit_doc_id) - cleanup(document_ref2.delete) - assert set(collection1.list_documents()) == {document_ref1, document_ref2} - assert set(collection2.list_documents()) == set() - assert set(collection3.list_documents()) == set() - snapshot2 = document_ref2.get() - assert snapshot2.to_dict() == data2 - assert snapshot2.create_time == update_time2 - assert snapshot2.update_time == update_time2 - assert document_ref2.id == explicit_doc_id - - nested_ref = collection1.document("doc") - - # Auto-ID for nested collection. - data3 = {"quux": b"\x00\x01\x02\x03"} - update_time3, document_ref3 = collection2.add(data3) - cleanup(document_ref3.delete) - assert set(collection1.list_documents()) == { - document_ref1, - document_ref2, - nested_ref, - } - assert set(collection2.list_documents()) == {document_ref3} - assert set(collection3.list_documents()) == set() - snapshot3 = document_ref3.get() - assert snapshot3.to_dict() == data3 - assert snapshot3.update_time == update_time3 - assert RANDOM_ID_REGEX.match(document_ref3.id) - - # Explicit for nested collection. - data4 = {"kazaam": None, "bad": False} - update_time4, document_ref4 = collection2.add(data4, document_id=explicit_doc_id) - cleanup(document_ref4.delete) - assert set(collection1.list_documents()) == { - document_ref1, - document_ref2, - nested_ref, - } - assert set(collection2.list_documents()) == {document_ref3, document_ref4} - assert set(collection3.list_documents()) == set() - snapshot4 = document_ref4.get() - assert snapshot4.to_dict() == data4 - assert snapshot4.create_time == update_time4 - assert snapshot4.update_time == update_time4 - assert document_ref4.id == explicit_doc_id - - # Exercise "missing" document (no doc, but subcollection). - data5 = {"bam": 123, "folyk": False} - update_time5, document_ref5 = collection3.add(data5) - cleanup(document_ref5.delete) - missing_ref = collection1.document("table") - assert set(collection1.list_documents()) == { - document_ref1, - document_ref2, - nested_ref, - missing_ref, - } - assert set(collection2.list_documents()) == {document_ref3, document_ref4} - assert set(collection3.list_documents()) == {document_ref5} - - -@pytest.fixture -def query_docs(client): - collection_id = "qs" + UNIQUE_RESOURCE_ID - sub_collection = "child" + UNIQUE_RESOURCE_ID - collection = client.collection(collection_id, "doc", sub_collection) - - cleanup = [] - stored = {} - num_vals = 5 - allowed_vals = six.moves.xrange(num_vals) - for a_val in allowed_vals: - for b_val in allowed_vals: - document_data = { - "a": a_val, - "b": b_val, - "c": [a_val, num_vals * 100], - "stats": {"sum": a_val + b_val, "product": a_val * b_val}, - } - _, doc_ref = collection.add(document_data) - # Add to clean-up. - cleanup.append(doc_ref.delete) - stored[doc_ref.id] = document_data - - yield collection, stored, allowed_vals - - for operation in cleanup: - operation() - - -def test_query_stream_w_simple_field_eq_op(query_docs): - collection, stored, allowed_vals = query_docs - query = collection.where("a", "==", 1) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == len(allowed_vals) - for key, value in six.iteritems(values): - assert stored[key] == value - assert value["a"] == 1 - - -def test_query_stream_w_simple_field_array_contains_op(query_docs): - collection, stored, allowed_vals = query_docs - query = collection.where("c", "array_contains", 1) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == len(allowed_vals) - for key, value in six.iteritems(values): - assert stored[key] == value - assert value["a"] == 1 - - -def test_query_stream_w_simple_field_in_op(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - query = collection.where("a", "in", [1, num_vals + 100]) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == len(allowed_vals) - for key, value in six.iteritems(values): - assert stored[key] == value - assert value["a"] == 1 - - -def test_query_stream_w_simple_field_array_contains_any_op(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - query = collection.where("c", "array_contains_any", [1, num_vals * 200]) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == len(allowed_vals) - for key, value in six.iteritems(values): - assert stored[key] == value - assert value["a"] == 1 - - -def test_query_stream_w_order_by(query_docs): - collection, stored, allowed_vals = query_docs - query = collection.order_by("b", direction=firestore.Query.DESCENDING) - values = [(snapshot.id, snapshot.to_dict()) for snapshot in query.stream()] - assert len(values) == len(stored) - b_vals = [] - for key, value in values: - assert stored[key] == value - b_vals.append(value["b"]) - # Make sure the ``b``-values are in DESCENDING order. - assert sorted(b_vals, reverse=True) == b_vals - - -def test_query_stream_w_field_path(query_docs): - collection, stored, allowed_vals = query_docs - query = collection.where("stats.sum", ">", 4) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == 10 - ab_pairs2 = set() - for key, value in six.iteritems(values): - assert stored[key] == value - ab_pairs2.add((value["a"], value["b"])) - - expected_ab_pairs = set( - [ - (a_val, b_val) - for a_val in allowed_vals - for b_val in allowed_vals - if a_val + b_val > 4 - ] - ) - assert expected_ab_pairs == ab_pairs2 - - -def test_query_stream_w_start_end_cursor(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - query = ( - collection.order_by("a") - .start_at({"a": num_vals - 2}) - .end_before({"a": num_vals - 1}) - ) - values = [(snapshot.id, snapshot.to_dict()) for snapshot in query.stream()] - assert len(values) == num_vals - for key, value in values: - assert stored[key] == value - assert value["a"] == num_vals - 2 - - -def test_query_stream_wo_results(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - query = collection.where("b", "==", num_vals + 100) - values = list(query.stream()) - assert len(values) == 0 - - -def test_query_stream_w_projection(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - query = collection.where("b", "<=", 1).select(["a", "stats.product"]) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - assert len(values) == num_vals * 2 # a ANY, b in (0, 1) - for key, value in six.iteritems(values): - expected = { - "a": stored[key]["a"], - "stats": {"product": stored[key]["stats"]["product"]}, - } - assert expected == value - - -def test_query_stream_w_multiple_filters(query_docs): - collection, stored, allowed_vals = query_docs - query = collection.where("stats.product", ">", 5).where("stats.product", "<", 10) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - matching_pairs = [ - (a_val, b_val) - for a_val in allowed_vals - for b_val in allowed_vals - if 5 < a_val * b_val < 10 - ] - assert len(values) == len(matching_pairs) - for key, value in six.iteritems(values): - assert stored[key] == value - pair = (value["a"], value["b"]) - assert pair in matching_pairs - - -def test_query_stream_w_offset(query_docs): - collection, stored, allowed_vals = query_docs - num_vals = len(allowed_vals) - offset = 3 - query = collection.where("b", "==", 2).offset(offset) - values = {snapshot.id: snapshot.to_dict() for snapshot in query.stream()} - # NOTE: We don't check the ``a``-values, since that would require - # an ``order_by('a')``, which combined with the ``b == 2`` - # filter would necessitate an index. - assert len(values) == num_vals - offset - for key, value in six.iteritems(values): - assert stored[key] == value - assert value["b"] == 2 - - -def test_query_with_order_dot_key(client, cleanup): - db = client - collection_id = "collek" + unique_resource_id("-") - collection = db.collection(collection_id) - for index in range(100, -1, -1): - doc = collection.document("test_{:09d}".format(index)) - data = {"count": 10 * index, "wordcount": {"page1": index * 10 + 100}} - doc.set(data) - cleanup(doc.delete) - query = collection.order_by("wordcount.page1").limit(3) - data = [doc.to_dict()["wordcount"]["page1"] for doc in query.stream()] - assert [100, 110, 120] == data - for snapshot in collection.order_by("wordcount.page1").limit(3).stream(): - last_value = snapshot.get("wordcount.page1") - cursor_with_nested_keys = {"wordcount": {"page1": last_value}} - found = list( - collection.order_by("wordcount.page1") - .start_after(cursor_with_nested_keys) - .limit(3) - .stream() - ) - found_data = [ - {u"count": 30, u"wordcount": {u"page1": 130}}, - {u"count": 40, u"wordcount": {u"page1": 140}}, - {u"count": 50, u"wordcount": {u"page1": 150}}, - ] - assert found_data == [snap.to_dict() for snap in found] - cursor_with_dotted_paths = {"wordcount.page1": last_value} - cursor_with_key_data = list( - collection.order_by("wordcount.page1") - .start_after(cursor_with_dotted_paths) - .limit(3) - .stream() - ) - assert found_data == [snap.to_dict() for snap in cursor_with_key_data] - - -def test_query_unary(client, cleanup): - collection_name = "unary" + UNIQUE_RESOURCE_ID - collection = client.collection(collection_name) - field_name = "foo" - - _, document0 = collection.add({field_name: None}) - # Add to clean-up. - cleanup(document0.delete) - - nan_val = float("nan") - _, document1 = collection.add({field_name: nan_val}) - # Add to clean-up. - cleanup(document1.delete) - - # 0. Query for null. - query0 = collection.where(field_name, "==", None) - values0 = list(query0.stream()) - assert len(values0) == 1 - snapshot0 = values0[0] - assert snapshot0.reference._path == document0._path - assert snapshot0.to_dict() == {field_name: None} - - # 1. Query for a NAN. - query1 = collection.where(field_name, "==", nan_val) - values1 = list(query1.stream()) - assert len(values1) == 1 - snapshot1 = values1[0] - assert snapshot1.reference._path == document1._path - data1 = snapshot1.to_dict() - assert len(data1) == 1 - assert math.isnan(data1[field_name]) - - -def test_collection_group_queries(client, cleanup): - collection_group = "b" + UNIQUE_RESOURCE_ID - - doc_paths = [ - "abc/123/" + collection_group + "/cg-doc1", - "abc/123/" + collection_group + "/cg-doc2", - collection_group + "/cg-doc3", - collection_group + "/cg-doc4", - "def/456/" + collection_group + "/cg-doc5", - collection_group + "/virtual-doc/nested-coll/not-cg-doc", - "x" + collection_group + "/not-cg-doc", - collection_group + "x/not-cg-doc", - "abc/123/" + collection_group + "x/not-cg-doc", - "abc/123/x" + collection_group + "/not-cg-doc", - "abc/" + collection_group, - ] - - batch = client.batch() - for doc_path in doc_paths: - doc_ref = client.document(doc_path) - batch.set(doc_ref, {"x": 1}) - cleanup(doc_ref.delete) - - batch.commit() - - query = client.collection_group(collection_group) - snapshots = list(query.stream()) - found = [snapshot.id for snapshot in snapshots] - expected = ["cg-doc1", "cg-doc2", "cg-doc3", "cg-doc4", "cg-doc5"] - assert found == expected - - -def test_collection_group_queries_startat_endat(client, cleanup): - collection_group = "b" + UNIQUE_RESOURCE_ID - - doc_paths = [ - "a/a/" + collection_group + "/cg-doc1", - "a/b/a/b/" + collection_group + "/cg-doc2", - "a/b/" + collection_group + "/cg-doc3", - "a/b/c/d/" + collection_group + "/cg-doc4", - "a/c/" + collection_group + "/cg-doc5", - collection_group + "/cg-doc6", - "a/b/nope/nope", - ] - - batch = client.batch() - for doc_path in doc_paths: - doc_ref = client.document(doc_path) - batch.set(doc_ref, {"x": doc_path}) - cleanup(doc_ref.delete) - - batch.commit() - - query = ( - client.collection_group(collection_group) - .order_by("__name__") - .start_at([client.document("a/b")]) - .end_at([client.document("a/b0")]) - ) - snapshots = list(query.stream()) - found = set(snapshot.id for snapshot in snapshots) - assert found == set(["cg-doc2", "cg-doc3", "cg-doc4"]) - - query = ( - client.collection_group(collection_group) - .order_by("__name__") - .start_after([client.document("a/b")]) - .end_before([client.document("a/b/" + collection_group + "/cg-doc3")]) - ) - snapshots = list(query.stream()) - found = set(snapshot.id for snapshot in snapshots) - assert found == set(["cg-doc2"]) - - -def test_collection_group_queries_filters(client, cleanup): - collection_group = "b" + UNIQUE_RESOURCE_ID - - doc_paths = [ - "a/a/" + collection_group + "/cg-doc1", - "a/b/a/b/" + collection_group + "/cg-doc2", - "a/b/" + collection_group + "/cg-doc3", - "a/b/c/d/" + collection_group + "/cg-doc4", - "a/c/" + collection_group + "/cg-doc5", - collection_group + "/cg-doc6", - "a/b/nope/nope", - ] - - batch = client.batch() - - for index, doc_path in enumerate(doc_paths): - doc_ref = client.document(doc_path) - batch.set(doc_ref, {"x": index}) - cleanup(doc_ref.delete) - - batch.commit() - - query = ( - client.collection_group(collection_group) - .where( - firestore.field_path.FieldPath.document_id(), ">=", client.document("a/b") - ) - .where( - firestore.field_path.FieldPath.document_id(), "<=", client.document("a/b0") - ) - ) - snapshots = list(query.stream()) - found = set(snapshot.id for snapshot in snapshots) - assert found == set(["cg-doc2", "cg-doc3", "cg-doc4"]) - - query = ( - client.collection_group(collection_group) - .where( - firestore.field_path.FieldPath.document_id(), ">", client.document("a/b") - ) - .where( - firestore.field_path.FieldPath.document_id(), - "<", - client.document("a/b/{}/cg-doc3".format(collection_group)), - ) - ) - snapshots = list(query.stream()) - found = set(snapshot.id for snapshot in snapshots) - assert found == set(["cg-doc2"]) - - -def test_get_all(client, cleanup): - collection_name = "get-all" + UNIQUE_RESOURCE_ID - - document1 = client.document(collection_name, "a") - document2 = client.document(collection_name, "b") - document3 = client.document(collection_name, "c") - # Add to clean-up before API requests (in case ``create()`` fails). - cleanup(document1.delete) - cleanup(document3.delete) - - data1 = {"a": {"b": 2, "c": 3}, "d": 4, "e": 0} - write_result1 = document1.create(data1) - data3 = {"a": {"b": 5, "c": 6}, "d": 7, "e": 100} - write_result3 = document3.create(data3) - - # 0. Get 3 unique documents, one of which is missing. - snapshots = list(client.get_all([document1, document2, document3])) - - assert snapshots[0].exists - assert snapshots[1].exists - assert not snapshots[2].exists - - snapshots = [snapshot for snapshot in snapshots if snapshot.exists] - id_attr = operator.attrgetter("id") - snapshots.sort(key=id_attr) - - snapshot1, snapshot3 = snapshots - check_snapshot(snapshot1, document1, data1, write_result1) - check_snapshot(snapshot3, document3, data3, write_result3) - - # 1. Get 2 colliding documents. - document1_also = client.document(collection_name, "a") - snapshots = list(client.get_all([document1, document1_also])) - - assert len(snapshots) == 1 - assert document1 is not document1_also - check_snapshot(snapshots[0], document1_also, data1, write_result1) - - # 2. Use ``field_paths`` / projection in ``get_all()``. - snapshots = list(client.get_all([document1, document3], field_paths=["a.b", "d"])) - - assert len(snapshots) == 2 - snapshots.sort(key=id_attr) - - snapshot1, snapshot3 = snapshots - restricted1 = {"a": {"b": data1["a"]["b"]}, "d": data1["d"]} - check_snapshot(snapshot1, document1, restricted1, write_result1) - restricted3 = {"a": {"b": data3["a"]["b"]}, "d": data3["d"]} - check_snapshot(snapshot3, document3, restricted3, write_result3) - - -def test_batch(client, cleanup): - collection_name = "batch" + UNIQUE_RESOURCE_ID - - document1 = client.document(collection_name, "abc") - document2 = client.document(collection_name, "mno") - document3 = client.document(collection_name, "xyz") - # Add to clean-up before API request (in case ``create()`` fails). - cleanup(document1.delete) - cleanup(document2.delete) - cleanup(document3.delete) - - data2 = {"some": {"deep": "stuff", "and": "here"}, "water": 100.0} - document2.create(data2) - document3.create({"other": 19}) - - batch = client.batch() - data1 = {"all": True} - batch.create(document1, data1) - new_value = "there" - batch.update(document2, {"some.and": new_value}) - batch.delete(document3) - write_results = batch.commit() - - assert len(write_results) == 3 - - write_result1 = write_results[0] - write_result2 = write_results[1] - write_result3 = write_results[2] - assert not write_result3.HasField("update_time") - - snapshot1 = document1.get() - assert snapshot1.to_dict() == data1 - assert snapshot1.create_time == write_result1.update_time - assert snapshot1.update_time == write_result1.update_time - - snapshot2 = document2.get() - assert snapshot2.to_dict() != data2 - data2["some"]["and"] = new_value - assert snapshot2.to_dict() == data2 - assert_timestamp_less(snapshot2.create_time, write_result2.update_time) - assert snapshot2.update_time == write_result2.update_time - - assert not document3.get().exists - - -def test_watch_document(client, cleanup): - db = client - collection_ref = db.collection(u"wd-users" + UNIQUE_RESOURCE_ID) - doc_ref = collection_ref.document(u"alovelace") - - # Initial setting - doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900}) - cleanup(doc_ref.delete) - - sleep(1) - - # Setup listener - def on_snapshot(docs, changes, read_time): - on_snapshot.called_count += 1 - - on_snapshot.called_count = 0 - - doc_ref.on_snapshot(on_snapshot) - - # Alter document - doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815}) - - sleep(1) - - for _ in range(10): - if on_snapshot.called_count > 0: - break - sleep(1) - - if on_snapshot.called_count not in (1, 2): - raise AssertionError( - "Failed to get one or two document changes: count: " - + str(on_snapshot.called_count) - ) - - -def test_watch_collection(client, cleanup): - db = client - collection_ref = db.collection(u"wc-users" + UNIQUE_RESOURCE_ID) - doc_ref = collection_ref.document(u"alovelace") - - # Initial setting - doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900}) - cleanup(doc_ref.delete) - - # Setup listener - def on_snapshot(docs, changes, read_time): - on_snapshot.called_count += 1 - for doc in [doc for doc in docs if doc.id == doc_ref.id]: - on_snapshot.born = doc.get("born") - - on_snapshot.called_count = 0 - on_snapshot.born = 0 - - collection_ref.on_snapshot(on_snapshot) - - # delay here so initial on_snapshot occurs and isn't combined with set - sleep(1) - - doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815}) - - for _ in range(10): - if on_snapshot.born == 1815: - break - sleep(1) - - if on_snapshot.born != 1815: - raise AssertionError( - "Expected the last document update to update born: " + str(on_snapshot.born) - ) - - -def test_watch_query(client, cleanup): - db = client - collection_ref = db.collection(u"wq-users" + UNIQUE_RESOURCE_ID) - doc_ref = collection_ref.document(u"alovelace") - query_ref = collection_ref.where("first", "==", u"Ada") - - # Initial setting - doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900}) - cleanup(doc_ref.delete) - - sleep(1) - - # Setup listener - def on_snapshot(docs, changes, read_time): - on_snapshot.called_count += 1 - - # A snapshot should return the same thing as if a query ran now. - query_ran = collection_ref.where("first", "==", u"Ada").stream() - assert len(docs) == len([i for i in query_ran]) - - on_snapshot.called_count = 0 - - query_ref.on_snapshot(on_snapshot) - - # Alter document - doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815}) - - for _ in range(10): - if on_snapshot.called_count == 1: - return - sleep(1) - - if on_snapshot.called_count != 1: - raise AssertionError( - "Failed to get exactly one document change: count: " - + str(on_snapshot.called_count) - ) - - -def test_watch_query_order(client, cleanup): - db = client - collection_ref = db.collection(u"users") - doc_ref1 = collection_ref.document(u"alovelace" + UNIQUE_RESOURCE_ID) - doc_ref2 = collection_ref.document(u"asecondlovelace" + UNIQUE_RESOURCE_ID) - doc_ref3 = collection_ref.document(u"athirdlovelace" + UNIQUE_RESOURCE_ID) - doc_ref4 = collection_ref.document(u"afourthlovelace" + UNIQUE_RESOURCE_ID) - doc_ref5 = collection_ref.document(u"afifthlovelace" + UNIQUE_RESOURCE_ID) - - query_ref = collection_ref.where("first", "==", u"Ada").order_by("last") - - # Setup listener - def on_snapshot(docs, changes, read_time): - try: - if len(docs) != 5: - return - # A snapshot should return the same thing as if a query ran now. - query_ran = query_ref.stream() - query_ran_results = [i for i in query_ran] - assert len(docs) == len(query_ran_results) - - # compare the order things are returned - for snapshot, query in zip(docs, query_ran_results): - assert snapshot.get("last") == query.get( - "last" - ), "expect the sort order to match, last" - assert snapshot.get("born") == query.get( - "born" - ), "expect the sort order to match, born" - on_snapshot.called_count += 1 - on_snapshot.last_doc_count = len(docs) - except Exception as e: - on_snapshot.failed = e - - on_snapshot.called_count = 0 - on_snapshot.last_doc_count = 0 - on_snapshot.failed = None - query_ref.on_snapshot(on_snapshot) - - sleep(1) - - doc_ref1.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815}) - cleanup(doc_ref1.delete) - - doc_ref2.set({u"first": u"Ada", u"last": u"SecondLovelace", u"born": 1815}) - cleanup(doc_ref2.delete) - - doc_ref3.set({u"first": u"Ada", u"last": u"ThirdLovelace", u"born": 1815}) - cleanup(doc_ref3.delete) - - doc_ref4.set({u"first": u"Ada", u"last": u"FourthLovelace", u"born": 1815}) - cleanup(doc_ref4.delete) - - doc_ref5.set({u"first": u"Ada", u"last": u"lovelace", u"born": 1815}) - cleanup(doc_ref5.delete) - - for _ in range(10): - if on_snapshot.last_doc_count == 5: - break - sleep(1) - - if on_snapshot.failed: - raise on_snapshot.failed - - if on_snapshot.last_doc_count != 5: - raise AssertionError( - "5 docs expected in snapshot method " + str(on_snapshot.last_doc_count) - ) diff --git a/firestore/tests/system/util/cleanup_firestore_documents.py b/firestore/tests/system/util/cleanup_firestore_documents.py deleted file mode 100644 index a944f44899f3..000000000000 --- a/firestore/tests/system/util/cleanup_firestore_documents.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Clean up documents leaked by system tests.""" -from google.cloud.firestore import Client - - -def zap_document(document): - print("Zapping document: {}".format(document.path)) - for collection in document.collections(): - zap_collection(collection) - document.delete() - - -def zap_collection(collection): - for document in collection.list_documents(): - zap_document(document) - - -def main(): - client = Client() - - for collection in client.collections(): - zap_collection(collection) - - -if __name__ == "__main__": - main() diff --git a/firestore/tests/unit/__init__.py b/firestore/tests/unit/__init__.py deleted file mode 100644 index ab6729095248..000000000000 --- a/firestore/tests/unit/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/firestore/tests/unit/gapic/v1/test_firestore_admin_client_v1.py b/firestore/tests/unit/gapic/v1/test_firestore_admin_client_v1.py deleted file mode 100644 index 9a731130d29b..000000000000 --- a/firestore/tests/unit/gapic/v1/test_firestore_admin_client_v1.py +++ /dev/null @@ -1,430 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import firestore_admin_v1 -from google.cloud.firestore_admin_v1.proto import field_pb2 -from google.cloud.firestore_admin_v1.proto import firestore_admin_pb2 -from google.cloud.firestore_admin_v1.proto import index_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestFirestoreAdminClient(object): - def test_create_index(self): - # Setup Expected Response - name = "name3373707" - done = True - expected_response = {"name": name, "done": done} - expected_response = operations_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - index = {} - - response = client.create_index(parent, index) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.CreateIndexRequest( - parent=parent, index=index - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_index_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - index = {} - - with pytest.raises(CustomException): - client.create_index(parent, index) - - def test_list_indexes(self): - # Setup Expected Response - next_page_token = "" - indexes_element = {} - indexes = [indexes_element] - expected_response = {"next_page_token": next_page_token, "indexes": indexes} - expected_response = firestore_admin_pb2.ListIndexesResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - - paged_list_response = client.list_indexes(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.indexes[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.ListIndexesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_indexes_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - - paged_list_response = client.list_indexes(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_index(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = index_pb2.Index(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - name = client.index_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[INDEX_ID]" - ) - - response = client.get_index(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.GetIndexRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_index_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - name = client.index_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[INDEX_ID]" - ) - - with pytest.raises(CustomException): - client.get_index(name) - - def test_delete_index(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - name = client.index_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[INDEX_ID]" - ) - - client.delete_index(name) - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.DeleteIndexRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_index_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - name = client.index_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[INDEX_ID]" - ) - - with pytest.raises(CustomException): - client.delete_index(name) - - def test_import_documents(self): - # Setup Expected Response - name_2 = "name2-1052831874" - done = True - expected_response = {"name": name_2, "done": done} - expected_response = operations_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - name = client.database_path("[PROJECT]", "[DATABASE]") - - response = client.import_documents(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.ImportDocumentsRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_import_documents_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - name = client.database_path("[PROJECT]", "[DATABASE]") - - with pytest.raises(CustomException): - client.import_documents(name) - - def test_export_documents(self): - # Setup Expected Response - name_2 = "name2-1052831874" - done = True - expected_response = {"name": name_2, "done": done} - expected_response = operations_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - name = client.database_path("[PROJECT]", "[DATABASE]") - - response = client.export_documents(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.ExportDocumentsRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_export_documents_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - name = client.database_path("[PROJECT]", "[DATABASE]") - - with pytest.raises(CustomException): - client.export_documents(name) - - def test_get_field(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = field_pb2.Field(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - name = client.field_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[FIELD_ID]" - ) - - response = client.get_field(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.GetFieldRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_field_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - name = client.field_path( - "[PROJECT]", "[DATABASE]", "[COLLECTION_ID]", "[FIELD_ID]" - ) - - with pytest.raises(CustomException): - client.get_field(name) - - def test_list_fields(self): - # Setup Expected Response - next_page_token = "" - fields_element = {} - fields = [fields_element] - expected_response = {"next_page_token": next_page_token, "fields": fields} - expected_response = firestore_admin_pb2.ListFieldsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - - paged_list_response = client.list_fields(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.fields[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.ListFieldsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_fields_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - parent = client.parent_path("[PROJECT]", "[DATABASE]", "[COLLECTION_ID]") - - paged_list_response = client.list_fields(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_field(self): - # Setup Expected Response - name = "name3373707" - done = True - expected_response = {"name": name, "done": done} - expected_response = operations_pb2.Operation(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup Request - field = {} - - response = client.update_field(field) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_admin_pb2.UpdateFieldRequest(field=field) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_field_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_admin_v1.FirestoreAdminClient() - - # Setup request - field = {} - - with pytest.raises(CustomException): - client.update_field(field) diff --git a/firestore/tests/unit/gapic/v1/test_firestore_client_v1.py b/firestore/tests/unit/gapic/v1/test_firestore_client_v1.py deleted file mode 100644 index 8e345da1aff9..000000000000 --- a/firestore/tests/unit/gapic/v1/test_firestore_client_v1.py +++ /dev/null @@ -1,646 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud.firestore_v1.gapic import firestore_client -from google.cloud.firestore_v1.proto import common_pb2 -from google.cloud.firestore_v1.proto import document_pb2 -from google.cloud.firestore_v1.proto import firestore_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def stream_stream( - self, method, request_serializer=None, response_deserializer=None - ): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestFirestoreClient(object): - def test_get_document(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - response = client.get_document(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.GetDocumentRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.get_document(name) - - def test_list_documents(self): - # Setup Expected Response - next_page_token = "" - documents_element = {} - documents = [documents_element] - expected_response = {"next_page_token": next_page_token, "documents": documents} - expected_response = firestore_pb2.ListDocumentsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - - paged_list_response = client.list_documents(parent, collection_id) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.documents[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.ListDocumentsRequest( - parent=parent, collection_id=collection_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_documents_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - - paged_list_response = client.list_documents(parent, collection_id) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_create_document(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - document_id = "documentId506676927" - document = {} - - response = client.create_document(parent, collection_id, document_id, document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.CreateDocumentRequest( - parent=parent, - collection_id=collection_id, - document_id=document_id, - document=document, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - document_id = "documentId506676927" - document = {} - - with pytest.raises(CustomException): - client.create_document(parent, collection_id, document_id, document) - - def test_update_document(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - document = {} - update_mask = {} - - response = client.update_document(document, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.UpdateDocumentRequest( - document=document, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - document = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_document(document, update_mask) - - def test_delete_document(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - client.delete_document(name) - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.DeleteDocumentRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.delete_document(name) - - def test_batch_get_documents(self): - # Setup Expected Response - missing = "missing1069449574" - transaction = b"-34" - expected_response = {"missing": missing, "transaction": transaction} - expected_response = firestore_pb2.BatchGetDocumentsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - documents = [] - - response = client.batch_get_documents(database, documents) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.BatchGetDocumentsRequest( - database=database, documents=documents - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_get_documents_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - documents = [] - - with pytest.raises(CustomException): - client.batch_get_documents(database, documents) - - def test_begin_transaction(self): - # Setup Expected Response - transaction = b"-34" - expected_response = {"transaction": transaction} - expected_response = firestore_pb2.BeginTransactionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - - response = client.begin_transaction(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.BeginTransactionRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_begin_transaction_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - - with pytest.raises(CustomException): - client.begin_transaction(database) - - def test_commit(self): - # Setup Expected Response - expected_response = {} - expected_response = firestore_pb2.CommitResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - writes = [] - - response = client.commit(database, writes) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.CommitRequest(database=database, writes=writes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_commit_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - writes = [] - - with pytest.raises(CustomException): - client.commit(database, writes) - - def test_rollback(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - transaction = b"-34" - - client.rollback(database, transaction) - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.RollbackRequest( - database=database, transaction=transaction - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - transaction = b"-34" - - with pytest.raises(CustomException): - client.rollback(database, transaction) - - def test_run_query(self): - # Setup Expected Response - transaction = b"-34" - skipped_results = 880286183 - expected_response = { - "transaction": transaction, - "skipped_results": skipped_results, - } - expected_response = firestore_pb2.RunQueryResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - response = client.run_query(parent) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.RunQueryRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_run_query_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.run_query(parent) - - def test_write(self): - # Setup Expected Response - stream_id = "streamId-315624902" - stream_token = b"122" - expected_response = {"stream_id": stream_id, "stream_token": stream_token} - expected_response = firestore_pb2.WriteResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - request = firestore_pb2.WriteRequest(**request) - requests = [request] - - response = client.write(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_write_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - - request = firestore_pb2.WriteRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.write(requests) - - def test_listen(self): - # Setup Expected Response - expected_response = {} - expected_response = firestore_pb2.ListenResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - request = firestore_pb2.ListenRequest(**request) - requests = [request] - - response = client.listen(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_listen_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - - request = firestore_pb2.ListenRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.listen(requests) - - def test_list_collection_ids(self): - # Setup Expected Response - next_page_token = "" - collection_ids_element = "collectionIdsElement1368994900" - collection_ids = [collection_ids_element] - expected_response = { - "next_page_token": next_page_token, - "collection_ids": collection_ids, - } - expected_response = firestore_pb2.ListCollectionIdsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - paged_list_response = client.list_collection_ids(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.collection_ids[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.ListCollectionIdsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_collection_ids_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - paged_list_response = client.list_collection_ids(parent) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/firestore/tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py b/firestore/tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py deleted file mode 100644 index f7bf05814d54..000000000000 --- a/firestore/tests/unit/gapic/v1beta1/test_firestore_client_v1beta1.py +++ /dev/null @@ -1,646 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud.firestore_v1beta1.gapic import firestore_client -from google.cloud.firestore_v1beta1.proto import common_pb2 -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.proto import firestore_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def stream_stream( - self, method, request_serializer=None, response_deserializer=None - ): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestFirestoreClient(object): - def test_get_document(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - response = client.get_document(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.GetDocumentRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.get_document(name) - - def test_list_documents(self): - # Setup Expected Response - next_page_token = "" - documents_element = {} - documents = [documents_element] - expected_response = {"next_page_token": next_page_token, "documents": documents} - expected_response = firestore_pb2.ListDocumentsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - - paged_list_response = client.list_documents(parent, collection_id) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.documents[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.ListDocumentsRequest( - parent=parent, collection_id=collection_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_documents_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - - paged_list_response = client.list_documents(parent, collection_id) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_create_document(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - document_id = "documentId506676927" - document = {} - - response = client.create_document(parent, collection_id, document_id, document) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.CreateDocumentRequest( - parent=parent, - collection_id=collection_id, - document_id=document_id, - document=document, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - collection_id = "collectionId-821242276" - document_id = "documentId506676927" - document = {} - - with pytest.raises(CustomException): - client.create_document(parent, collection_id, document_id, document) - - def test_update_document(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = document_pb2.Document(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - document = {} - update_mask = {} - - response = client.update_document(document, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.UpdateDocumentRequest( - document=document, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - document = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_document(document, update_mask) - - def test_delete_document(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - client.delete_document(name) - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.DeleteDocumentRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_document_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - name = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.delete_document(name) - - def test_batch_get_documents(self): - # Setup Expected Response - missing = "missing1069449574" - transaction = b"-34" - expected_response = {"missing": missing, "transaction": transaction} - expected_response = firestore_pb2.BatchGetDocumentsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - documents = [] - - response = client.batch_get_documents(database, documents) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.BatchGetDocumentsRequest( - database=database, documents=documents - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_get_documents_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - documents = [] - - with pytest.raises(CustomException): - client.batch_get_documents(database, documents) - - def test_begin_transaction(self): - # Setup Expected Response - transaction = b"-34" - expected_response = {"transaction": transaction} - expected_response = firestore_pb2.BeginTransactionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - - response = client.begin_transaction(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.BeginTransactionRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_begin_transaction_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - - with pytest.raises(CustomException): - client.begin_transaction(database) - - def test_commit(self): - # Setup Expected Response - expected_response = {} - expected_response = firestore_pb2.CommitResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - writes = [] - - response = client.commit(database, writes) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.CommitRequest(database=database, writes=writes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_commit_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - writes = [] - - with pytest.raises(CustomException): - client.commit(database, writes) - - def test_rollback(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - transaction = b"-34" - - client.rollback(database, transaction) - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.RollbackRequest( - database=database, transaction=transaction - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - transaction = b"-34" - - with pytest.raises(CustomException): - client.rollback(database, transaction) - - def test_run_query(self): - # Setup Expected Response - transaction = b"-34" - skipped_results = 880286183 - expected_response = { - "transaction": transaction, - "skipped_results": skipped_results, - } - expected_response = firestore_pb2.RunQueryResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - response = client.run_query(parent) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.RunQueryRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_run_query_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - with pytest.raises(CustomException): - client.run_query(parent) - - def test_write(self): - # Setup Expected Response - stream_id = "streamId-315624902" - stream_token = b"122" - expected_response = {"stream_id": stream_id, "stream_token": stream_token} - expected_response = firestore_pb2.WriteResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - request = firestore_pb2.WriteRequest(**request) - requests = [request] - - response = client.write(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_write_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - - request = firestore_pb2.WriteRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.write(requests) - - def test_listen(self): - # Setup Expected Response - expected_response = {} - expected_response = firestore_pb2.ListenResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - request = firestore_pb2.ListenRequest(**request) - requests = [request] - - response = client.listen(requests) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - actual_requests = channel.requests[0][1] - assert len(actual_requests) == 1 - actual_request = list(actual_requests)[0] - assert request == actual_request - - def test_listen_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - database = client.database_root_path("[PROJECT]", "[DATABASE]") - request = {"database": database} - - request = firestore_pb2.ListenRequest(**request) - requests = [request] - - with pytest.raises(CustomException): - client.listen(requests) - - def test_list_collection_ids(self): - # Setup Expected Response - next_page_token = "" - collection_ids_element = "collectionIdsElement1368994900" - collection_ids = [collection_ids_element] - expected_response = { - "next_page_token": next_page_token, - "collection_ids": collection_ids, - } - expected_response = firestore_pb2.ListCollectionIdsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup Request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - paged_list_response = client.list_collection_ids(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.collection_ids[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = firestore_pb2.ListCollectionIdsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_collection_ids_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = firestore_client.FirestoreClient() - - # Setup request - parent = client.any_path_path( - "[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]" - ) - - paged_list_response = client.list_collection_ids(parent) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/firestore/tests/unit/test_firestore_shim.py b/firestore/tests/unit/test_firestore_shim.py deleted file mode 100644 index 001e45354916..000000000000 --- a/firestore/tests/unit/test_firestore_shim.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class TestFirestoreShim(unittest.TestCase): - def test_shim_matches_firestore_v1(self): - from google.cloud import firestore - from google.cloud import firestore_v1 - - self.assertEqual(firestore.__all__, firestore_v1.__all__) - - for name in firestore.__all__: - found = getattr(firestore, name) - expected = getattr(firestore_v1, name) - self.assertIs(found, expected) diff --git a/firestore/tests/unit/v1/__init__.py b/firestore/tests/unit/v1/__init__.py deleted file mode 100644 index ab6729095248..000000000000 --- a/firestore/tests/unit/v1/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/firestore/tests/unit/v1/test__helpers.py b/firestore/tests/unit/v1/test__helpers.py deleted file mode 100644 index e804d9bfcb6f..000000000000 --- a/firestore/tests/unit/v1/test__helpers.py +++ /dev/null @@ -1,2395 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import sys -import unittest - -import mock - - -class TestGeoPoint(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1._helpers import GeoPoint - - return GeoPoint - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - lat = 81.25 - lng = 359.984375 - geo_pt = self._make_one(lat, lng) - self.assertEqual(geo_pt.latitude, lat) - self.assertEqual(geo_pt.longitude, lng) - - def test_to_protobuf(self): - from google.type import latlng_pb2 - - lat = 0.015625 - lng = 20.03125 - geo_pt = self._make_one(lat, lng) - result = geo_pt.to_protobuf() - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - self.assertEqual(result, geo_pt_pb) - - def test___eq__(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = self._make_one(lat, lng) - self.assertEqual(geo_pt1, geo_pt2) - - def test___eq__type_differ(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = object() - self.assertNotEqual(geo_pt1, geo_pt2) - self.assertIs(geo_pt1.__eq__(geo_pt2), NotImplemented) - - def test___ne__same_value(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = self._make_one(lat, lng) - comparison_val = geo_pt1 != geo_pt2 - self.assertFalse(comparison_val) - - def test___ne__(self): - geo_pt1 = self._make_one(0.0, 1.0) - geo_pt2 = self._make_one(2.0, 3.0) - self.assertNotEqual(geo_pt1, geo_pt2) - - def test___ne__type_differ(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = object() - self.assertNotEqual(geo_pt1, geo_pt2) - self.assertIs(geo_pt1.__ne__(geo_pt2), NotImplemented) - - -class Test_verify_path(unittest.TestCase): - @staticmethod - def _call_fut(path, is_collection): - from google.cloud.firestore_v1._helpers import verify_path - - return verify_path(path, is_collection) - - def test_empty(self): - path = () - with self.assertRaises(ValueError): - self._call_fut(path, True) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_wrong_length_collection(self): - path = ("foo", "bar") - with self.assertRaises(ValueError): - self._call_fut(path, True) - - def test_wrong_length_document(self): - path = ("Kind",) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_wrong_type_collection(self): - path = (99, "ninety-nine", "zap") - with self.assertRaises(ValueError): - self._call_fut(path, True) - - def test_wrong_type_document(self): - path = ("Users", "Ada", "Candy", {}) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_success_collection(self): - path = ("Computer", "Magic", "Win") - ret_val = self._call_fut(path, True) - # NOTE: We are just checking that it didn't fail. - self.assertIsNone(ret_val) - - def test_success_document(self): - path = ("Tokenizer", "Seventeen", "Cheese", "Burger") - ret_val = self._call_fut(path, False) - # NOTE: We are just checking that it didn't fail. - self.assertIsNone(ret_val) - - -class Test_encode_value(unittest.TestCase): - @staticmethod - def _call_fut(value): - from google.cloud.firestore_v1._helpers import encode_value - - return encode_value(value) - - def test_none(self): - from google.protobuf import struct_pb2 - - result = self._call_fut(None) - expected = _value_pb(null_value=struct_pb2.NULL_VALUE) - self.assertEqual(result, expected) - - def test_boolean(self): - result = self._call_fut(True) - expected = _value_pb(boolean_value=True) - self.assertEqual(result, expected) - - def test_integer(self): - value = 425178 - result = self._call_fut(value) - expected = _value_pb(integer_value=value) - self.assertEqual(result, expected) - - def test_float(self): - value = 123.4453125 - result = self._call_fut(value) - expected = _value_pb(double_value=value) - self.assertEqual(result, expected) - - def test_datetime_with_nanos(self): - from google.api_core.datetime_helpers import DatetimeWithNanoseconds - from google.protobuf import timestamp_pb2 - - dt_seconds = 1488768504 - dt_nanos = 458816991 - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb) - - result = self._call_fut(dt_val) - expected = _value_pb(timestamp_value=timestamp_pb) - self.assertEqual(result, expected) - - def test_datetime_wo_nanos(self): - from google.protobuf import timestamp_pb2 - - dt_seconds = 1488768504 - dt_nanos = 458816000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos) - - result = self._call_fut(dt_val) - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - expected = _value_pb(timestamp_value=timestamp_pb) - self.assertEqual(result, expected) - - def test_string(self): - value = u"\u2018left quote, right quote\u2019" - result = self._call_fut(value) - expected = _value_pb(string_value=value) - self.assertEqual(result, expected) - - def test_bytes(self): - value = b"\xe3\xf2\xff\x00" - result = self._call_fut(value) - expected = _value_pb(bytes_value=value) - self.assertEqual(result, expected) - - def test_reference_value(self): - client = _make_client() - - value = client.document("my", "friend") - result = self._call_fut(value) - expected = _value_pb(reference_value=value._document_path) - self.assertEqual(result, expected) - - def test_geo_point(self): - from google.cloud.firestore_v1._helpers import GeoPoint - - value = GeoPoint(50.5, 88.75) - result = self._call_fut(value) - expected = _value_pb(geo_point_value=value.to_protobuf()) - self.assertEqual(result, expected) - - def test_array(self): - from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue - - result = self._call_fut([99, True, 118.5]) - - array_pb = ArrayValue( - values=[ - _value_pb(integer_value=99), - _value_pb(boolean_value=True), - _value_pb(double_value=118.5), - ] - ) - expected = _value_pb(array_value=array_pb) - self.assertEqual(result, expected) - - def test_map(self): - from google.cloud.firestore_v1.proto.document_pb2 import MapValue - - result = self._call_fut({"abc": 285, "def": b"piglatin"}) - - map_pb = MapValue( - fields={ - "abc": _value_pb(integer_value=285), - "def": _value_pb(bytes_value=b"piglatin"), - } - ) - expected = _value_pb(map_value=map_pb) - self.assertEqual(result, expected) - - def test_bad_type(self): - value = object() - with self.assertRaises(TypeError): - self._call_fut(value) - - -class Test_encode_dict(unittest.TestCase): - @staticmethod - def _call_fut(values_dict): - from google.cloud.firestore_v1._helpers import encode_dict - - return encode_dict(values_dict) - - def test_many_types(self): - from google.protobuf import struct_pb2 - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue - from google.cloud.firestore_v1.proto.document_pb2 import MapValue - - dt_seconds = 1497397225 - dt_nanos = 465964000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos) - - client = _make_client() - document = client.document("most", "adjective", "thing", "here") - - values_dict = { - "foo": None, - "bar": True, - "baz": 981, - "quux": 2.875, - "quuz": dt_val, - "corge": u"\N{snowman}", - "grault": b"\xe2\x98\x83", - "wibble": document, - "garply": [u"fork", 4.0], - "waldo": {"fred": u"zap", "thud": False}, - } - encoded_dict = self._call_fut(values_dict) - expected_dict = { - "foo": _value_pb(null_value=struct_pb2.NULL_VALUE), - "bar": _value_pb(boolean_value=True), - "baz": _value_pb(integer_value=981), - "quux": _value_pb(double_value=2.875), - "quuz": _value_pb( - timestamp_value=timestamp_pb2.Timestamp( - seconds=dt_seconds, nanos=dt_nanos - ) - ), - "corge": _value_pb(string_value=u"\N{snowman}"), - "grault": _value_pb(bytes_value=b"\xe2\x98\x83"), - "wibble": _value_pb(reference_value=document._document_path), - "garply": _value_pb( - array_value=ArrayValue( - values=[ - _value_pb(string_value=u"fork"), - _value_pb(double_value=4.0), - ] - ) - ), - "waldo": _value_pb( - map_value=MapValue( - fields={ - "fred": _value_pb(string_value=u"zap"), - "thud": _value_pb(boolean_value=False), - } - ) - ), - } - self.assertEqual(encoded_dict, expected_dict) - - -class Test_reference_value_to_document(unittest.TestCase): - @staticmethod - def _call_fut(reference_value, client): - from google.cloud.firestore_v1._helpers import reference_value_to_document - - return reference_value_to_document(reference_value, client) - - def test_bad_format(self): - from google.cloud.firestore_v1._helpers import BAD_REFERENCE_ERROR - - reference_value = "not/the/right/format" - with self.assertRaises(ValueError) as exc_info: - self._call_fut(reference_value, None) - - err_msg = BAD_REFERENCE_ERROR.format(reference_value) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_same_client(self): - from google.cloud.firestore_v1.document import DocumentReference - - client = _make_client() - document = client.document("that", "this") - reference_value = document._document_path - - new_document = self._call_fut(reference_value, client) - self.assertIsNot(new_document, document) - - self.assertIsInstance(new_document, DocumentReference) - self.assertIs(new_document._client, client) - self.assertEqual(new_document._path, document._path) - - def test_different_client(self): - from google.cloud.firestore_v1._helpers import WRONG_APP_REFERENCE - - client1 = _make_client(project="kirk") - document = client1.document("tin", "foil") - reference_value = document._document_path - - client2 = _make_client(project="spock") - with self.assertRaises(ValueError) as exc_info: - self._call_fut(reference_value, client2) - - err_msg = WRONG_APP_REFERENCE.format(reference_value, client2._database_string) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class Test_decode_value(unittest.TestCase): - @staticmethod - def _call_fut(value, client=mock.sentinel.client): - from google.cloud.firestore_v1._helpers import decode_value - - return decode_value(value, client) - - def test_none(self): - from google.protobuf import struct_pb2 - - value = _value_pb(null_value=struct_pb2.NULL_VALUE) - self.assertIsNone(self._call_fut(value)) - - def test_bool(self): - value1 = _value_pb(boolean_value=True) - self.assertTrue(self._call_fut(value1)) - value2 = _value_pb(boolean_value=False) - self.assertFalse(self._call_fut(value2)) - - def test_int(self): - int_val = 29871 - value = _value_pb(integer_value=int_val) - self.assertEqual(self._call_fut(value), int_val) - - def test_float(self): - float_val = 85.9296875 - value = _value_pb(double_value=float_val) - self.assertEqual(self._call_fut(value), float_val) - - @unittest.skipIf( - (3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python" - ) - def test_datetime(self): - from google.api_core.datetime_helpers import DatetimeWithNanoseconds - from google.protobuf import timestamp_pb2 - - dt_seconds = 552855006 - dt_nanos = 766961828 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - value = _value_pb(timestamp_value=timestamp_pb) - - expected_dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb) - self.assertEqual(self._call_fut(value), expected_dt_val) - - def test_unicode(self): - unicode_val = u"zorgon" - value = _value_pb(string_value=unicode_val) - self.assertEqual(self._call_fut(value), unicode_val) - - def test_bytes(self): - bytes_val = b"abc\x80" - value = _value_pb(bytes_value=bytes_val) - self.assertEqual(self._call_fut(value), bytes_val) - - def test_reference(self): - from google.cloud.firestore_v1.document import DocumentReference - - client = _make_client() - path = (u"then", u"there-was-one") - document = client.document(*path) - ref_string = document._document_path - value = _value_pb(reference_value=ref_string) - - result = self._call_fut(value, client) - self.assertIsInstance(result, DocumentReference) - self.assertIs(result._client, client) - self.assertEqual(result._path, path) - - def test_geo_point(self): - from google.cloud.firestore_v1._helpers import GeoPoint - - geo_pt = GeoPoint(latitude=42.5, longitude=99.0625) - value = _value_pb(geo_point_value=geo_pt.to_protobuf()) - self.assertEqual(self._call_fut(value), geo_pt) - - def test_array(self): - from google.cloud.firestore_v1.proto import document_pb2 - - sub_value1 = _value_pb(boolean_value=True) - sub_value2 = _value_pb(double_value=14.1396484375) - sub_value3 = _value_pb(bytes_value=b"\xde\xad\xbe\xef") - array_pb = document_pb2.ArrayValue(values=[sub_value1, sub_value2, sub_value3]) - value = _value_pb(array_value=array_pb) - - expected = [ - sub_value1.boolean_value, - sub_value2.double_value, - sub_value3.bytes_value, - ] - self.assertEqual(self._call_fut(value), expected) - - def test_map(self): - from google.cloud.firestore_v1.proto import document_pb2 - - sub_value1 = _value_pb(integer_value=187680) - sub_value2 = _value_pb(string_value=u"how low can you go?") - map_pb = document_pb2.MapValue( - fields={"first": sub_value1, "second": sub_value2} - ) - value = _value_pb(map_value=map_pb) - - expected = { - "first": sub_value1.integer_value, - "second": sub_value2.string_value, - } - self.assertEqual(self._call_fut(value), expected) - - def test_nested_map(self): - from google.cloud.firestore_v1.proto import document_pb2 - - actual_value1 = 1009876 - actual_value2 = u"hey you guys" - actual_value3 = 90.875 - map_pb1 = document_pb2.MapValue( - fields={ - "lowest": _value_pb(integer_value=actual_value1), - "aside": _value_pb(string_value=actual_value2), - } - ) - map_pb2 = document_pb2.MapValue( - fields={ - "middle": _value_pb(map_value=map_pb1), - "aside": _value_pb(boolean_value=True), - } - ) - map_pb3 = document_pb2.MapValue( - fields={ - "highest": _value_pb(map_value=map_pb2), - "aside": _value_pb(double_value=actual_value3), - } - ) - value = _value_pb(map_value=map_pb3) - - expected = { - "highest": { - "middle": {"lowest": actual_value1, "aside": actual_value2}, - "aside": True, - }, - "aside": actual_value3, - } - self.assertEqual(self._call_fut(value), expected) - - def test_unset_value_type(self): - with self.assertRaises(ValueError): - self._call_fut(_value_pb()) - - def test_unknown_value_type(self): - value_pb = mock.Mock(spec=["WhichOneof"]) - value_pb.WhichOneof.return_value = "zoob_value" - - with self.assertRaises(ValueError): - self._call_fut(value_pb) - - value_pb.WhichOneof.assert_called_once_with("value_type") - - -class Test_decode_dict(unittest.TestCase): - @staticmethod - def _call_fut(value_fields, client=mock.sentinel.client): - from google.cloud.firestore_v1._helpers import decode_dict - - return decode_dict(value_fields, client) - - @unittest.skipIf( - (3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python" - ) - def test_many_types(self): - from google.protobuf import struct_pb2 - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto.document_pb2 import ArrayValue - from google.cloud.firestore_v1.proto.document_pb2 import MapValue - from google.cloud._helpers import UTC - from google.cloud.firestore_v1.field_path import FieldPath - - dt_seconds = 1394037350 - dt_nanos = 667285000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp( - dt_seconds + 1e-9 * dt_nanos - ).replace(tzinfo=UTC) - - value_fields = { - "foo": _value_pb(null_value=struct_pb2.NULL_VALUE), - "bar": _value_pb(boolean_value=True), - "baz": _value_pb(integer_value=981), - "quux": _value_pb(double_value=2.875), - "quuz": _value_pb( - timestamp_value=timestamp_pb2.Timestamp( - seconds=dt_seconds, nanos=dt_nanos - ) - ), - "corge": _value_pb(string_value=u"\N{snowman}"), - "grault": _value_pb(bytes_value=b"\xe2\x98\x83"), - "garply": _value_pb( - array_value=ArrayValue( - values=[ - _value_pb(string_value=u"fork"), - _value_pb(double_value=4.0), - ] - ) - ), - "waldo": _value_pb( - map_value=MapValue( - fields={ - "fred": _value_pb(string_value=u"zap"), - "thud": _value_pb(boolean_value=False), - } - ) - ), - FieldPath("a", "b", "c").to_api_repr(): _value_pb(boolean_value=False), - } - expected = { - "foo": None, - "bar": True, - "baz": 981, - "quux": 2.875, - "quuz": dt_val, - "corge": u"\N{snowman}", - "grault": b"\xe2\x98\x83", - "garply": [u"fork", 4.0], - "waldo": {"fred": u"zap", "thud": False}, - "a.b.c": False, - } - self.assertEqual(self._call_fut(value_fields), expected) - - -class Test_get_doc_id(unittest.TestCase): - @staticmethod - def _call_fut(document_pb, expected_prefix): - from google.cloud.firestore_v1._helpers import get_doc_id - - return get_doc_id(document_pb, expected_prefix) - - @staticmethod - def _dummy_ref_string(collection_id): - from google.cloud.firestore_v1.client import DEFAULT_DATABASE - - project = u"bazzzz" - return u"projects/{}/databases/{}/documents/{}".format( - project, DEFAULT_DATABASE, collection_id - ) - - def test_success(self): - from google.cloud.firestore_v1.proto import document_pb2 - - prefix = self._dummy_ref_string("sub-collection") - actual_id = "this-is-the-one" - name = "{}/{}".format(prefix, actual_id) - - document_pb = document_pb2.Document(name=name) - document_id = self._call_fut(document_pb, prefix) - self.assertEqual(document_id, actual_id) - - def test_failure(self): - from google.cloud.firestore_v1.proto import document_pb2 - - actual_prefix = self._dummy_ref_string("the-right-one") - wrong_prefix = self._dummy_ref_string("the-wrong-one") - name = "{}/{}".format(actual_prefix, "sorry-wont-works") - - document_pb = document_pb2.Document(name=name) - with self.assertRaises(ValueError) as exc_info: - self._call_fut(document_pb, wrong_prefix) - - exc_args = exc_info.exception.args - self.assertEqual(len(exc_args), 4) - self.assertEqual(exc_args[1], name) - self.assertEqual(exc_args[3], wrong_prefix) - - -class Test_extract_fields(unittest.TestCase): - @staticmethod - def _call_fut(document_data, prefix_path, expand_dots=False): - from google.cloud.firestore_v1 import _helpers - - return _helpers.extract_fields( - document_data, prefix_path, expand_dots=expand_dots - ) - - def test_w_empty_document(self): - from google.cloud.firestore_v1._helpers import _EmptyDict - - document_data = {} - prefix_path = _make_field_path() - expected = [(_make_field_path(), _EmptyDict)] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_invalid_key_and_expand_dots(self): - document_data = {"b": 1, "a~d": 2, "c": 3} - prefix_path = _make_field_path() - - with self.assertRaises(ValueError): - list(self._call_fut(document_data, prefix_path, expand_dots=True)) - - def test_w_shallow_keys(self): - document_data = {"b": 1, "a": 2, "c": 3} - prefix_path = _make_field_path() - expected = [ - (_make_field_path("a"), 2), - (_make_field_path("b"), 1), - (_make_field_path("c"), 3), - ] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_nested(self): - from google.cloud.firestore_v1._helpers import _EmptyDict - - document_data = {"b": {"a": {"d": 4, "c": 3, "g": {}}, "e": 7}, "f": 5} - prefix_path = _make_field_path() - expected = [ - (_make_field_path("b", "a", "c"), 3), - (_make_field_path("b", "a", "d"), 4), - (_make_field_path("b", "a", "g"), _EmptyDict), - (_make_field_path("b", "e"), 7), - (_make_field_path("f"), 5), - ] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_expand_dotted(self): - from google.cloud.firestore_v1._helpers import _EmptyDict - - document_data = { - "b": {"a": {"d": 4, "c": 3, "g": {}, "k.l.m": 17}, "e": 7}, - "f": 5, - "h.i.j": 9, - } - prefix_path = _make_field_path() - expected = [ - (_make_field_path("b", "a", "c"), 3), - (_make_field_path("b", "a", "d"), 4), - (_make_field_path("b", "a", "g"), _EmptyDict), - (_make_field_path("b", "a", "k.l.m"), 17), - (_make_field_path("b", "e"), 7), - (_make_field_path("f"), 5), - (_make_field_path("h", "i", "j"), 9), - ] - - iterator = self._call_fut(document_data, prefix_path, expand_dots=True) - self.assertEqual(list(iterator), expected) - - -class Test_set_field_value(unittest.TestCase): - @staticmethod - def _call_fut(document_data, field_path, value): - from google.cloud.firestore_v1 import _helpers - - return _helpers.set_field_value(document_data, field_path, value) - - def test_normal_value_w_shallow(self): - document = {} - field_path = _make_field_path("a") - value = 3 - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": 3}) - - def test_normal_value_w_nested(self): - document = {} - field_path = _make_field_path("a", "b", "c") - value = 3 - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {"b": {"c": 3}}}) - - def test_empty_dict_w_shallow(self): - from google.cloud.firestore_v1._helpers import _EmptyDict - - document = {} - field_path = _make_field_path("a") - value = _EmptyDict - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {}}) - - def test_empty_dict_w_nested(self): - from google.cloud.firestore_v1._helpers import _EmptyDict - - document = {} - field_path = _make_field_path("a", "b", "c") - value = _EmptyDict - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {"b": {"c": {}}}}) - - -class Test_get_field_value(unittest.TestCase): - @staticmethod - def _call_fut(document_data, field_path): - from google.cloud.firestore_v1 import _helpers - - return _helpers.get_field_value(document_data, field_path) - - def test_w_empty_path(self): - document = {} - - with self.assertRaises(ValueError): - self._call_fut(document, _make_field_path()) - - def test_miss_shallow(self): - document = {} - - with self.assertRaises(KeyError): - self._call_fut(document, _make_field_path("nonesuch")) - - def test_miss_nested(self): - document = {"a": {"b": {}}} - - with self.assertRaises(KeyError): - self._call_fut(document, _make_field_path("a", "b", "c")) - - def test_hit_shallow(self): - document = {"a": 1} - - self.assertEqual(self._call_fut(document, _make_field_path("a")), 1) - - def test_hit_nested(self): - document = {"a": {"b": {"c": 1}}} - - self.assertEqual(self._call_fut(document, _make_field_path("a", "b", "c")), 1) - - -class TestDocumentExtractor(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1 import _helpers - - return _helpers.DocumentExtractor - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertTrue(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_delete_field_shallow(self): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - document_data = {"a": DELETE_FIELD} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, [_make_field_path("a")]) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_delete_field_nested(self): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - document_data = {"a": {"b": {"c": DELETE_FIELD}}} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, [_make_field_path("a", "b", "c")]) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_server_timestamp_shallow(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_data = {"a": SERVER_TIMESTAMP} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, [_make_field_path("a")]) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_server_timestamp_nested(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_data = {"a": {"b": {"c": SERVER_TIMESTAMP}}} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, [_make_field_path("a", "b", "c")]) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_array_remove_shallow(self): - from google.cloud.firestore_v1.transforms import ArrayRemove - - values = [1, 3, 5] - document_data = {"a": ArrayRemove(values)} - - inst = self._make_one(document_data) - - expected_array_removes = {_make_field_path("a"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_array_remove_nested(self): - from google.cloud.firestore_v1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayRemove(values)}}} - - inst = self._make_one(document_data) - - expected_array_removes = {_make_field_path("a", "b", "c"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_array_union_shallow(self): - from google.cloud.firestore_v1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = {"a": ArrayUnion(values)} - - inst = self._make_one(document_data) - - expected_array_unions = {_make_field_path("a"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_array_union_nested(self): - from google.cloud.firestore_v1.transforms import ArrayUnion - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayUnion(values)}}} - - inst = self._make_one(document_data) - - expected_array_unions = {_make_field_path("a", "b", "c"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_increment_shallow(self): - from google.cloud.firestore_v1.transforms import Increment - - value = 1 - document_data = {"a": Increment(value)} - - inst = self._make_one(document_data) - - expected_increments = {_make_field_path("a"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, expected_increments) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_increment_nested(self): - from google.cloud.firestore_v1.transforms import Increment - - value = 2 - document_data = {"a": {"b": {"c": Increment(value)}}} - - inst = self._make_one(document_data) - - expected_increments = {_make_field_path("a", "b", "c"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, expected_increments) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_maximum_shallow(self): - from google.cloud.firestore_v1.transforms import Maximum - - value = 1 - document_data = {"a": Maximum(value)} - - inst = self._make_one(document_data) - - expected_maximums = {_make_field_path("a"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, expected_maximums) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_maximum_nested(self): - from google.cloud.firestore_v1.transforms import Maximum - - value = 2 - document_data = {"a": {"b": {"c": Maximum(value)}}} - - inst = self._make_one(document_data) - - expected_maximums = {_make_field_path("a", "b", "c"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, expected_maximums) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_minimum_shallow(self): - from google.cloud.firestore_v1.transforms import Minimum - - value = 1 - document_data = {"a": Minimum(value)} - - inst = self._make_one(document_data) - - expected_minimums = {_make_field_path("a"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, expected_minimums) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_minimum_nested(self): - from google.cloud.firestore_v1.transforms import Minimum - - value = 2 - document_data = {"a": {"b": {"c": Minimum(value)}}} - - inst = self._make_one(document_data) - - expected_minimums = {_make_field_path("a", "b", "c"): value} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, expected_minimums) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_empty_dict_shallow(self): - document_data = {"a": {}} - - inst = self._make_one(document_data) - - expected_field_paths = [_make_field_path("a")] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_empty_dict_nested(self): - document_data = {"a": {"b": {"c": {}}}} - - inst = self._make_one(document_data) - - expected_field_paths = [_make_field_path("a", "b", "c")] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_normal_value_shallow(self): - document_data = {"b": 1, "a": 2, "c": 3} - - inst = self._make_one(document_data) - - expected_field_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - - def test_ctor_w_normal_value_nested(self): - document_data = {"b": {"a": {"d": 4, "c": 3}, "e": 7}, "f": 5} - - inst = self._make_one(document_data) - - expected_field_paths = [ - _make_field_path("b", "a", "c"), - _make_field_path("b", "a", "d"), - _make_field_path("b", "e"), - _make_field_path("f"), - ] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.increments, {}) - self.assertEqual(inst.maximums, {}) - self.assertEqual(inst.minimums, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - - def test_get_update_pb_w_exists_precondition(self): - from google.cloud.firestore_v1.proto import write_pb2 - - document_data = {} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - update_pb = inst.get_update_pb(document_path, exists=False) - - self.assertIsInstance(update_pb, write_pb2.Write) - self.assertEqual(update_pb.update.name, document_path) - self.assertEqual(update_pb.update.fields, document_data) - self.assertTrue(update_pb.HasField("current_document")) - self.assertFalse(update_pb.current_document.exists) - - def test_get_update_pb_wo_exists_precondition(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1._helpers import encode_dict - - document_data = {"a": 1} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - update_pb = inst.get_update_pb(document_path) - - self.assertIsInstance(update_pb, write_pb2.Write) - self.assertEqual(update_pb.update.name, document_path) - self.assertEqual(update_pb.update.fields, encode_dict(document_data)) - self.assertFalse(update_pb.HasField("current_document")) - - def test_get_transform_pb_w_server_timestamp_w_exists_precondition(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1._helpers import REQUEST_TIME_ENUM - - document_data = {"a": SERVER_TIMESTAMP} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path, exists=False) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a") - self.assertEqual(transform.set_to_server_value, REQUEST_TIME_ENUM) - self.assertTrue(transform_pb.HasField("current_document")) - self.assertFalse(transform_pb.current_document.exists) - - def test_get_transform_pb_w_server_timestamp_wo_exists_precondition(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1._helpers import REQUEST_TIME_ENUM - - document_data = {"a": {"b": {"c": SERVER_TIMESTAMP}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - self.assertEqual(transform.set_to_server_value, REQUEST_TIME_ENUM) - self.assertFalse(transform_pb.HasField("current_document")) - - @staticmethod - def _array_value_to_list(array_value): - from google.cloud.firestore_v1._helpers import decode_value - - return [decode_value(element, client=None) for element in array_value.values] - - def test_get_transform_pb_w_array_remove(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayRemove(values)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - removed = self._array_value_to_list(transform.remove_all_from_array) - self.assertEqual(removed, values) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_array_union(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = {"a": {"b": {"c": ArrayUnion(values)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = self._array_value_to_list(transform.append_missing_elements) - self.assertEqual(added, values) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_increment_int(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Increment - - value = 1 - document_data = {"a": {"b": {"c": Increment(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.increment.integer_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_increment_float(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Increment - - value = 3.1415926 - document_data = {"a": {"b": {"c": Increment(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.increment.double_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_maximum_int(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Maximum - - value = 1 - document_data = {"a": {"b": {"c": Maximum(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.maximum.integer_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_maximum_float(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Maximum - - value = 3.1415926 - document_data = {"a": {"b": {"c": Maximum(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.maximum.double_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_minimum_int(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Minimum - - value = 1 - document_data = {"a": {"b": {"c": Minimum(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.minimum.integer_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_minimum_float(self): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transforms import Minimum - - value = 3.1415926 - document_data = {"a": {"b": {"c": Minimum(value)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = transform.minimum.double_value - self.assertEqual(added, value) - self.assertFalse(transform_pb.HasField("current_document")) - - -class Test_pbs_for_create(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data): - from google.cloud.firestore_v1._helpers import pbs_for_create - - return pbs_for_create(document_path, document_data) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1._helpers import encode_dict - from google.cloud.firestore_v1.proto import common_pb2 - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)), - current_document=common_pb2.Precondition(exists=False), - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - def _helper(self, do_transform=False, empty_val=False): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - if do_transform: - document_data["butter"] = SERVER_TIMESTAMP - - if empty_val: - document_data["mustard"] = {} - - write_pbs = self._call_fut(document_path, document_data) - - if empty_val: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True, mustard={} - ) - else: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True - ) - expected_pbs = [update_pb] - - if do_transform: - expected_pbs.append( - self._make_write_w_transform(document_path, fields=["butter"]) - ) - - self.assertEqual(write_pbs, expected_pbs) - - def test_without_transform(self): - self._helper() - - def test_w_transform(self): - self._helper(do_transform=True) - - def test_w_transform_and_empty_value(self): - self._helper(do_transform=True, empty_val=True) - - -class Test_pbs_for_set_no_merge(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data): - from google.cloud.firestore_v1 import _helpers - - return _helpers.pbs_for_set_no_merge(document_path, document_data) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1._helpers import encode_dict - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)) - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - def test_w_empty_document(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {} - - write_pbs = self._call_fut(document_path, document_data) - - update_pb = self._make_write_w_document(document_path) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_w_only_server_timestamp(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"butter": SERVER_TIMESTAMP} - - write_pbs = self._call_fut(document_path, document_data) - - update_pb = self._make_write_w_document(document_path) - transform_pb = self._make_write_w_transform(document_path, ["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def _helper(self, do_transform=False, empty_val=False): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - if do_transform: - document_data["butter"] = SERVER_TIMESTAMP - - if empty_val: - document_data["mustard"] = {} - - write_pbs = self._call_fut(document_path, document_data) - - if empty_val: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True, mustard={} - ) - else: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True - ) - expected_pbs = [update_pb] - - if do_transform: - expected_pbs.append( - self._make_write_w_transform(document_path, fields=["butter"]) - ) - - self.assertEqual(write_pbs, expected_pbs) - - def test_defaults(self): - self._helper() - - def test_w_transform(self): - self._helper(do_transform=True) - - def test_w_transform_and_empty_value(self): - # Exercise #5944 - self._helper(do_transform=True, empty_val=True) - - -class TestDocumentExtractorForMerge(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1 import _helpers - - return _helpers.DocumentExtractorForMerge - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - - self.assertEqual(inst.data_merge, []) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, []) - - def test_apply_merge_all_w_empty_document(self): - document_data = {} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - self.assertEqual(inst.data_merge, []) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, []) - self.assertFalse(inst.has_updates) - - def test_apply_merge_all_w_delete(self): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - document_data = {"write_me": "value", "delete_me": DELETE_FIELD} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - expected_data_merge = [ - _make_field_path("delete_me"), - _make_field_path("write_me"), - ] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, expected_data_merge) - self.assertTrue(inst.has_updates) - - def test_apply_merge_all_w_server_timestamp(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_data = {"write_me": "value", "timestamp": SERVER_TIMESTAMP} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("timestamp")] - expected_merge = [_make_field_path("timestamp"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_empty_document(self): - document_data = {} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["nonesuch", "or.this"]) - - def test_apply_merge_list_fields_w_unmerged_delete(self): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - document_data = { - "write_me": "value", - "delete_me": DELETE_FIELD, - "ignore_me": 123, - "unmerged_delete": DELETE_FIELD, - } - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["write_me", "delete_me"]) - - def test_apply_merge_list_fields_w_delete(self): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - document_data = { - "write_me": "value", - "delete_me": DELETE_FIELD, - "ignore_me": 123, - } - inst = self._make_one(document_data) - - inst.apply_merge(["write_me", "delete_me"]) - - expected_set_fields = {"write_me": "value"} - expected_deleted_fields = [_make_field_path("delete_me")] - self.assertEqual(inst.set_fields, expected_set_fields) - self.assertEqual(inst.deleted_fields, expected_deleted_fields) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_prefixes(self): - - document_data = {"a": {"b": {"c": 123}}} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["a", "a.b"]) - - def test_apply_merge_list_fields_w_missing_data_string_paths(self): - - document_data = {"write_me": "value", "ignore_me": 123} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["write_me", "nonesuch"]) - - def test_apply_merge_list_fields_w_non_merge_field(self): - - document_data = {"write_me": "value", "ignore_me": 123} - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me")]) - - expected_set_fields = {"write_me": "value"} - self.assertEqual(inst.set_fields, expected_set_fields) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_server_timestamp(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_data = { - "write_me": "value", - "timestamp": SERVER_TIMESTAMP, - "ignored_stamp": SERVER_TIMESTAMP, - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("timestamp")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("timestamp")] - expected_merge = [_make_field_path("timestamp"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_server_timestamps = [_make_field_path("timestamp")] - self.assertEqual(inst.server_timestamps, expected_server_timestamps) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_array_remove(self): - from google.cloud.firestore_v1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = { - "write_me": "value", - "remove_me": ArrayRemove(values), - "ignored_remove_me": ArrayRemove((1, 3, 5)), - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("remove_me")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("remove_me")] - expected_merge = [_make_field_path("remove_me"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_array_removes = {_make_field_path("remove_me"): values} - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_array_union(self): - from google.cloud.firestore_v1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = { - "write_me": "value", - "union_me": ArrayUnion(values), - "ignored_union_me": ArrayUnion((2, 4, 8)), - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("union_me")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("union_me")] - expected_merge = [_make_field_path("union_me"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_array_unions = {_make_field_path("union_me"): values} - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertTrue(inst.has_updates) - - -class Test_pbs_for_set_with_merge(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data, merge): - from google.cloud.firestore_v1 import _helpers - - return _helpers.pbs_for_set_with_merge( - document_path, document_data, merge=merge - ) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1._helpers import encode_dict - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)) - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - @staticmethod - def _update_document_mask(update_pb, field_paths): - from google.cloud.firestore_v1.proto import common_pb2 - - update_pb.update_mask.CopyFrom( - common_pb2.DocumentMask(field_paths=sorted(field_paths)) - ) - - def test_with_merge_true_wo_transform(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - write_pbs = self._call_fut(document_path, document_data, merge=True) - - update_pb = self._make_write_w_document(document_path, **document_data) - self._update_document_mask(update_pb, field_paths=sorted(document_data)) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_wo_transform(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - write_pbs = self._call_fut(document_path, document_data, merge=["cheese"]) - - update_pb = self._make_write_w_document( - document_path, cheese=document_data["cheese"] - ) - self._update_document_mask(update_pb, field_paths=["cheese"]) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_true_w_transform(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = SERVER_TIMESTAMP - - write_pbs = self._call_fut(document_path, document_data, merge=True) - - update_pb = self._make_write_w_document(document_path, **update_data) - self._update_document_mask(update_pb, field_paths=sorted(update_data)) - transform_pb = self._make_write_w_transform(document_path, fields=["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = SERVER_TIMESTAMP - - write_pbs = self._call_fut( - document_path, document_data, merge=["cheese", "butter"] - ) - - update_pb = self._make_write_w_document( - document_path, cheese=document_data["cheese"] - ) - self._update_document_mask(update_pb, ["cheese"]) - transform_pb = self._make_write_w_transform(document_path, fields=["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform_masking_simple(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = {"pecan": SERVER_TIMESTAMP} - - write_pbs = self._call_fut(document_path, document_data, merge=["butter.pecan"]) - - update_pb = self._make_write_w_document(document_path) - transform_pb = self._make_write_w_transform( - document_path, fields=["butter.pecan"] - ) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform_parent(self): - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = {"popcorn": "yum", "pecan": SERVER_TIMESTAMP} - - write_pbs = self._call_fut( - document_path, document_data, merge=["cheese", "butter"] - ) - - update_pb = self._make_write_w_document( - document_path, cheese=update_data["cheese"], butter={"popcorn": "yum"} - ) - self._update_document_mask(update_pb, ["cheese", "butter"]) - transform_pb = self._make_write_w_transform( - document_path, fields=["butter.pecan"] - ) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - -class TestDocumentExtractorForUpdate(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1 import _helpers - - return _helpers.DocumentExtractorForUpdate - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, []) - - def test_ctor_w_simple_keys(self): - document_data = {"a": 1, "b": 2, "c": 3} - - expected_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_nested_keys(self): - document_data = {"a": {"d": {"e": 1}}, "b": {"f": 7}, "c": 3} - - expected_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_dotted_keys(self): - document_data = {"a.d.e": 1, "b.f": 7, "c": 3} - - expected_paths = [ - _make_field_path("a", "d", "e"), - _make_field_path("b", "f"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_nested_dotted_keys(self): - document_data = {"a.d.e": 1, "b.f": {"h.i": 9}, "c": 3} - - expected_paths = [ - _make_field_path("a", "d", "e"), - _make_field_path("b", "f"), - _make_field_path("c"), - ] - expected_set_fields = {"a": {"d": {"e": 1}}, "b": {"f": {"h.i": 9}}, "c": 3} - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - self.assertEqual(inst.set_fields, expected_set_fields) - - -class Test_pbs_for_update(unittest.TestCase): - @staticmethod - def _call_fut(document_path, field_updates, option): - from google.cloud.firestore_v1._helpers import pbs_for_update - - return pbs_for_update(document_path, field_updates, option) - - def _helper(self, option=None, do_transform=False, **write_kwargs): - from google.cloud.firestore_v1 import _helpers - from google.cloud.firestore_v1.field_path import FieldPath - from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - document_path = _make_ref_string(u"toy", u"car", u"onion", u"garlic") - field_path1 = "bitez.yum" - value = b"\x00\x01" - field_path2 = "blog.internet" - - field_updates = {field_path1: value} - if do_transform: - field_updates[field_path2] = SERVER_TIMESTAMP - - write_pbs = self._call_fut(document_path, field_updates, option) - - map_pb = document_pb2.MapValue(fields={"yum": _value_pb(bytes_value=value)}) - - field_paths = [field_path1] - - expected_update_pb = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields={"bitez": _value_pb(map_value=map_pb)} - ), - update_mask=common_pb2.DocumentMask(field_paths=field_paths), - **write_kwargs - ) - if isinstance(option, _helpers.ExistsOption): - precondition = common_pb2.Precondition(exists=False) - expected_update_pb.current_document.CopyFrom(precondition) - expected_pbs = [expected_update_pb] - if do_transform: - transform_paths = FieldPath.from_string(field_path2) - server_val = enums.DocumentTransform.FieldTransform.ServerValue - expected_transform_pb = write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, - field_transforms=[ - write_pb2.DocumentTransform.FieldTransform( - field_path=transform_paths.to_api_repr(), - set_to_server_value=server_val.REQUEST_TIME, - ) - ], - ) - ) - expected_pbs.append(expected_transform_pb) - self.assertEqual(write_pbs, expected_pbs) - - def test_without_option(self): - from google.cloud.firestore_v1.proto import common_pb2 - - precondition = common_pb2.Precondition(exists=True) - self._helper(current_document=precondition) - - def test_with_exists_option(self): - from google.cloud.firestore_v1.client import _helpers - - option = _helpers.ExistsOption(False) - self._helper(option=option) - - def test_update_and_transform(self): - from google.cloud.firestore_v1.proto import common_pb2 - - precondition = common_pb2.Precondition(exists=True) - self._helper(current_document=precondition, do_transform=True) - - -class Test_pb_for_delete(unittest.TestCase): - @staticmethod - def _call_fut(document_path, option): - from google.cloud.firestore_v1._helpers import pb_for_delete - - return pb_for_delete(document_path, option) - - def _helper(self, option=None, **write_kwargs): - from google.cloud.firestore_v1.proto import write_pb2 - - document_path = _make_ref_string(u"chicken", u"philly", u"one", u"two") - write_pb = self._call_fut(document_path, option) - - expected_pb = write_pb2.Write(delete=document_path, **write_kwargs) - self.assertEqual(write_pb, expected_pb) - - def test_without_option(self): - self._helper() - - def test_with_option(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1 import _helpers - - update_time = timestamp_pb2.Timestamp(seconds=1309700594, nanos=822211297) - option = _helpers.LastUpdateOption(update_time) - precondition = common_pb2.Precondition(update_time=update_time) - self._helper(option=option, current_document=precondition) - - -class Test_get_transaction_id(unittest.TestCase): - @staticmethod - def _call_fut(transaction, **kwargs): - from google.cloud.firestore_v1._helpers import get_transaction_id - - return get_transaction_id(transaction, **kwargs) - - def test_no_transaction(self): - ret_val = self._call_fut(None) - self.assertIsNone(ret_val) - - def test_invalid_transaction(self): - from google.cloud.firestore_v1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - self.assertFalse(transaction.in_progress) - with self.assertRaises(ValueError): - self._call_fut(transaction) - - def test_after_writes_not_allowed(self): - from google.cloud.firestore_v1._helpers import ReadAfterWriteError - from google.cloud.firestore_v1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - transaction._id = b"under-hook" - transaction._write_pbs.append(mock.sentinel.write) - - with self.assertRaises(ReadAfterWriteError): - self._call_fut(transaction) - - def test_after_writes_allowed(self): - from google.cloud.firestore_v1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - txn_id = b"we-are-0fine" - transaction._id = txn_id - transaction._write_pbs.append(mock.sentinel.write) - - ret_val = self._call_fut(transaction, read_operation=False) - self.assertEqual(ret_val, txn_id) - - def test_good_transaction(self): - from google.cloud.firestore_v1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - txn_id = b"doubt-it" - transaction._id = txn_id - self.assertTrue(transaction.in_progress) - - self.assertEqual(self._call_fut(transaction), txn_id) - - -class Test_metadata_with_prefix(unittest.TestCase): - @staticmethod - def _call_fut(database_string): - from google.cloud.firestore_v1._helpers import metadata_with_prefix - - return metadata_with_prefix(database_string) - - def test_it(self): - database_string = u"projects/prahj/databases/dee-bee" - metadata = self._call_fut(database_string) - - self.assertEqual(metadata, [("google-cloud-resource-prefix", database_string)]) - - -class TestWriteOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1._helpers import WriteOption - - return WriteOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_modify_write(self): - option = self._make_one() - with self.assertRaises(NotImplementedError): - option.modify_write(None) - - -class TestLastUpdateOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1._helpers import LastUpdateOption - - return LastUpdateOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - option = self._make_one(mock.sentinel.timestamp) - self.assertIs(option._last_update_time, mock.sentinel.timestamp) - - def test___eq___different_type(self): - option = self._make_one(mock.sentinel.timestamp) - other = object() - self.assertFalse(option == other) - - def test___eq___different_timestamp(self): - option = self._make_one(mock.sentinel.timestamp) - other = self._make_one(mock.sentinel.other_timestamp) - self.assertFalse(option == other) - - def test___eq___same_timestamp(self): - option = self._make_one(mock.sentinel.timestamp) - other = self._make_one(mock.sentinel.timestamp) - self.assertTrue(option == other) - - def test_modify_write_update_time(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=683893592, nanos=229362000) - option = self._make_one(timestamp_pb) - write_pb = write_pb2.Write() - ret_val = option.modify_write(write_pb) - - self.assertIsNone(ret_val) - expected_doc = common_pb2.Precondition(update_time=timestamp_pb) - self.assertEqual(write_pb.current_document, expected_doc) - - -class TestExistsOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1._helpers import ExistsOption - - return ExistsOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - option = self._make_one(mock.sentinel.totes_bool) - self.assertIs(option._exists, mock.sentinel.totes_bool) - - def test___eq___different_type(self): - option = self._make_one(mock.sentinel.timestamp) - other = object() - self.assertFalse(option == other) - - def test___eq___different_exists(self): - option = self._make_one(True) - other = self._make_one(False) - self.assertFalse(option == other) - - def test___eq___same_exists(self): - option = self._make_one(True) - other = self._make_one(True) - self.assertTrue(option == other) - - def test_modify_write(self): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - for exists in (True, False): - option = self._make_one(exists) - write_pb = write_pb2.Write() - ret_val = option.modify_write(write_pb) - - self.assertIsNone(ret_val) - expected_doc = common_pb2.Precondition(exists=exists) - self.assertEqual(write_pb.current_document, expected_doc) - - -def _value_pb(**kwargs): - from google.cloud.firestore_v1.proto.document_pb2 import Value - - return Value(**kwargs) - - -def _make_ref_string(project, database, *path): - from google.cloud.firestore_v1 import _helpers - - doc_rel_path = _helpers.DOCUMENT_PATH_DELIMITER.join(path) - return u"projects/{}/databases/{}/documents/{}".format( - project, database, doc_rel_path - ) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="quark"): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project=project, credentials=credentials) - - -def _make_field_path(*fields): - from google.cloud.firestore_v1 import field_path - - return field_path.FieldPath(*fields) diff --git a/firestore/tests/unit/v1/test_batch.py b/firestore/tests/unit/v1/test_batch.py deleted file mode 100644 index 08421d6039dd..000000000000 --- a/firestore/tests/unit/v1/test_batch.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class TestWriteBatch(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.batch import WriteBatch - - return WriteBatch - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - batch = self._make_one(mock.sentinel.client) - self.assertIs(batch._client, mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - self.assertIsNone(batch.write_results) - self.assertIsNone(batch.commit_time) - - def test__add_write_pbs(self): - batch = self._make_one(mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - batch._add_write_pbs([mock.sentinel.write1, mock.sentinel.write2]) - self.assertEqual(batch._write_pbs, [mock.sentinel.write1, mock.sentinel.write2]) - - def test_create(self): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("this", "one") - document_data = {"a": 10, "b": 2.5} - ret_val = batch.create(reference, document_data) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={ - "a": _value_pb(integer_value=document_data["a"]), - "b": _value_pb(double_value=document_data["b"]), - }, - ), - current_document=common_pb2.Precondition(exists=False), - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_set(self): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("another", "one") - field = "zapzap" - value = u"meadows and flowers" - document_data = {field: value} - ret_val = batch.set(reference, document_data) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={field: _value_pb(string_value=value)}, - ) - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_set_merge(self): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("another", "one") - field = "zapzap" - value = u"meadows and flowers" - document_data = {field: value} - ret_val = batch.set(reference, document_data, merge=True) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={field: _value_pb(string_value=value)}, - ), - update_mask={"field_paths": [field]}, - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_update(self): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("cats", "cradle") - field_path = "head.foot" - value = u"knees toes shoulders" - field_updates = {field_path: value} - - ret_val = batch.update(reference, field_updates) - self.assertIsNone(ret_val) - - map_pb = document_pb2.MapValue(fields={"foot": _value_pb(string_value=value)}) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={"head": _value_pb(map_value=map_pb)}, - ), - update_mask=common_pb2.DocumentMask(field_paths=[field_path]), - current_document=common_pb2.Precondition(exists=True), - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_delete(self): - from google.cloud.firestore_v1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("early", "mornin", "dawn", "now") - ret_val = batch.delete(reference) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write(delete=reference._document_path) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_commit(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.Mock(spec=["commit"]) - timestamp = timestamp_pb2.Timestamp(seconds=1234567, nanos=123456798) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult(), write_pb2.WriteResult()], - commit_time=timestamp, - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client("grand") - client._firestore_api_internal = firestore_api - - # Actually make a batch with some mutations and call commit(). - batch = self._make_one(client) - document1 = client.document("a", "b") - batch.create(document1, {"ten": 10, "buck": u"ets"}) - document2 = client.document("c", "d", "e", "f") - batch.delete(document2) - write_pbs = batch._write_pbs[::] - - write_results = batch.commit() - self.assertEqual(write_results, list(commit_response.write_results)) - self.assertEqual(batch.write_results, write_results) - self.assertEqual(batch.commit_time, timestamp) - # Make sure batch has no more "changes". - self.assertEqual(batch._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_as_context_mgr_wo_error(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - firestore_api = mock.Mock(spec=["commit"]) - timestamp = timestamp_pb2.Timestamp(seconds=1234567, nanos=123456798) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult(), write_pb2.WriteResult()], - commit_time=timestamp, - ) - firestore_api.commit.return_value = commit_response - client = _make_client() - client._firestore_api_internal = firestore_api - batch = self._make_one(client) - document1 = client.document("a", "b") - document2 = client.document("c", "d", "e", "f") - - with batch as ctx_mgr: - self.assertIs(ctx_mgr, batch) - ctx_mgr.create(document1, {"ten": 10, "buck": u"ets"}) - ctx_mgr.delete(document2) - write_pbs = batch._write_pbs[::] - - self.assertEqual(batch.write_results, list(commit_response.write_results)) - self.assertEqual(batch.commit_time, timestamp) - # Make sure batch has no more "changes". - self.assertEqual(batch._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_as_context_mgr_w_error(self): - firestore_api = mock.Mock(spec=["commit"]) - client = _make_client() - client._firestore_api_internal = firestore_api - batch = self._make_one(client) - document1 = client.document("a", "b") - document2 = client.document("c", "d", "e", "f") - - with self.assertRaises(RuntimeError): - with batch as ctx_mgr: - ctx_mgr.create(document1, {"ten": 10, "buck": u"ets"}) - ctx_mgr.delete(document2) - raise RuntimeError("testing") - - self.assertIsNone(batch.write_results) - self.assertIsNone(batch.commit_time) - # batch still has its changes - self.assertEqual(len(batch._write_pbs), 2) - - firestore_api.commit.assert_not_called() - - -def _value_pb(**kwargs): - from google.cloud.firestore_v1.proto.document_pb2 import Value - - return Value(**kwargs) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="seventy-nine"): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project=project, credentials=credentials) diff --git a/firestore/tests/unit/v1/test_client.py b/firestore/tests/unit/v1/test_client.py deleted file mode 100644 index 7ec062422a6c..000000000000 --- a/firestore/tests/unit/v1/test_client.py +++ /dev/null @@ -1,745 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import types -import unittest - -import mock - - -class TestClient(unittest.TestCase): - - PROJECT = "my-prahjekt" - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.client import Client - - return Client - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def _make_default_one(self): - credentials = _make_credentials() - return self._make_one(project=self.PROJECT, credentials=credentials) - - def test_constructor(self): - from google.cloud.firestore_v1.client import _CLIENT_INFO - from google.cloud.firestore_v1.client import DEFAULT_DATABASE - - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - self.assertEqual(client.project, self.PROJECT) - self.assertEqual(client._credentials, credentials) - self.assertEqual(client._database, DEFAULT_DATABASE) - self.assertIs(client._client_info, _CLIENT_INFO) - self.assertIsNone(client._emulator_host) - - def test_constructor_with_emulator_host(self): - from google.cloud.firestore_v1.client import _FIRESTORE_EMULATOR_HOST - - credentials = _make_credentials() - emulator_host = "localhost:8081" - with mock.patch("os.getenv") as getenv: - getenv.return_value = emulator_host - client = self._make_one(project=self.PROJECT, credentials=credentials) - self.assertEqual(client._emulator_host, emulator_host) - getenv.assert_called_once_with(_FIRESTORE_EMULATOR_HOST) - - def test_constructor_explicit(self): - credentials = _make_credentials() - database = "now-db" - client_info = mock.Mock() - client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - database=database, - client_info=client_info, - client_options=client_options, - ) - self.assertEqual(client.project, self.PROJECT) - self.assertEqual(client._credentials, credentials) - self.assertEqual(client._database, database) - self.assertIs(client._client_info, client_info) - self.assertIs(client._client_options, client_options) - - def test_constructor_w_client_options(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - client_options={"api_endpoint": "foo-firestore.googleapis.com"}, - ) - self.assertEqual(client._target, "foo-firestore.googleapis.com") - - @mock.patch( - "google.cloud.firestore_v1.gapic.firestore_client.FirestoreClient", - autospec=True, - return_value=mock.sentinel.firestore_api, - ) - def test__firestore_api_property(self, mock_client): - mock_client.SERVICE_ADDRESS = "endpoint" - client = self._make_default_one() - client_info = client._client_info = mock.Mock() - self.assertIsNone(client._firestore_api_internal) - firestore_api = client._firestore_api - self.assertIs(firestore_api, mock_client.return_value) - self.assertIs(firestore_api, client._firestore_api_internal) - mock_client.assert_called_once_with( - transport=client._transport, client_info=client_info - ) - - # Call again to show that it is cached, but call count is still 1. - self.assertIs(client._firestore_api, mock_client.return_value) - self.assertEqual(mock_client.call_count, 1) - - @mock.patch( - "google.cloud.firestore_v1.gapic.firestore_client.FirestoreClient", - autospec=True, - return_value=mock.sentinel.firestore_api, - ) - @mock.patch( - "google.cloud.firestore_v1.gapic.transports.firestore_grpc_transport.firestore_pb2_grpc.grpc.insecure_channel", - autospec=True, - ) - def test__firestore_api_property_with_emulator( - self, mock_insecure_channel, mock_client - ): - emulator_host = "localhost:8081" - with mock.patch("os.getenv") as getenv: - getenv.return_value = emulator_host - client = self._make_default_one() - - self.assertIsNone(client._firestore_api_internal) - firestore_api = client._firestore_api - self.assertIs(firestore_api, mock_client.return_value) - self.assertIs(firestore_api, client._firestore_api_internal) - - mock_insecure_channel.assert_called_once_with(emulator_host) - - # Call again to show that it is cached, but call count is still 1. - self.assertIs(client._firestore_api, mock_client.return_value) - self.assertEqual(mock_client.call_count, 1) - - def test___database_string_property(self): - credentials = _make_credentials() - database = "cheeeeez" - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - self.assertIsNone(client._database_string_internal) - database_string = client._database_string - expected = "projects/{}/databases/{}".format(client.project, client._database) - self.assertEqual(database_string, expected) - self.assertIs(database_string, client._database_string_internal) - - # Swap it out with a unique value to verify it is cached. - client._database_string_internal = mock.sentinel.cached - self.assertIs(client._database_string, mock.sentinel.cached) - - def test___rpc_metadata_property(self): - credentials = _make_credentials() - database = "quanta" - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - - self.assertEqual( - client._rpc_metadata, - [("google-cloud-resource-prefix", client._database_string)], - ) - - def test__rpc_metadata_property_with_emulator(self): - emulator_host = "localhost:8081" - with mock.patch("os.getenv") as getenv: - getenv.return_value = emulator_host - - credentials = _make_credentials() - database = "quanta" - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - - self.assertEqual( - client._rpc_metadata, - [ - ("google-cloud-resource-prefix", client._database_string), - ("authorization", "Bearer owner"), - ], - ) - - def test_collection_factory(self): - from google.cloud.firestore_v1.collection import CollectionReference - - collection_id = "users" - client = self._make_default_one() - collection = client.collection(collection_id) - - self.assertEqual(collection._path, (collection_id,)) - self.assertIs(collection._client, client) - self.assertIsInstance(collection, CollectionReference) - - def test_collection_factory_nested(self): - from google.cloud.firestore_v1.collection import CollectionReference - - client = self._make_default_one() - parts = ("users", "alovelace", "beep") - collection_path = "/".join(parts) - collection1 = client.collection(collection_path) - - self.assertEqual(collection1._path, parts) - self.assertIs(collection1._client, client) - self.assertIsInstance(collection1, CollectionReference) - - # Make sure using segments gives the same result. - collection2 = client.collection(*parts) - self.assertEqual(collection2._path, parts) - self.assertIs(collection2._client, client) - self.assertIsInstance(collection2, CollectionReference) - - def test_collection_group(self): - client = self._make_default_one() - query = client.collection_group("collectionId").where("foo", "==", u"bar") - - assert query._all_descendants - assert query._field_filters[0].field.field_path == "foo" - assert query._field_filters[0].value.string_value == u"bar" - assert query._field_filters[0].op == query._field_filters[0].EQUAL - assert query._parent.id == "collectionId" - - def test_collection_group_no_slashes(self): - client = self._make_default_one() - with self.assertRaises(ValueError): - client.collection_group("foo/bar") - - def test_document_factory(self): - from google.cloud.firestore_v1.document import DocumentReference - - parts = ("rooms", "roomA") - client = self._make_default_one() - doc_path = "/".join(parts) - document1 = client.document(doc_path) - - self.assertEqual(document1._path, parts) - self.assertIs(document1._client, client) - self.assertIsInstance(document1, DocumentReference) - - # Make sure using segments gives the same result. - document2 = client.document(*parts) - self.assertEqual(document2._path, parts) - self.assertIs(document2._client, client) - self.assertIsInstance(document2, DocumentReference) - - def test_document_factory_w_absolute_path(self): - from google.cloud.firestore_v1.document import DocumentReference - - parts = ("rooms", "roomA") - client = self._make_default_one() - doc_path = "/".join(parts) - to_match = client.document(doc_path) - document1 = client.document(to_match._document_path) - - self.assertEqual(document1._path, parts) - self.assertIs(document1._client, client) - self.assertIsInstance(document1, DocumentReference) - - def test_document_factory_w_nested_path(self): - from google.cloud.firestore_v1.document import DocumentReference - - client = self._make_default_one() - parts = ("rooms", "roomA", "shoes", "dressy") - doc_path = "/".join(parts) - document1 = client.document(doc_path) - - self.assertEqual(document1._path, parts) - self.assertIs(document1._client, client) - self.assertIsInstance(document1, DocumentReference) - - # Make sure using segments gives the same result. - document2 = client.document(*parts) - self.assertEqual(document2._path, parts) - self.assertIs(document2._client, client) - self.assertIsInstance(document2, DocumentReference) - - def test_field_path(self): - klass = self._get_target_class() - self.assertEqual(klass.field_path("a", "b", "c"), "a.b.c") - - def test_write_option_last_update(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1._helpers import LastUpdateOption - - timestamp = timestamp_pb2.Timestamp(seconds=1299767599, nanos=811111097) - - klass = self._get_target_class() - option = klass.write_option(last_update_time=timestamp) - self.assertIsInstance(option, LastUpdateOption) - self.assertEqual(option._last_update_time, timestamp) - - def test_write_option_exists(self): - from google.cloud.firestore_v1._helpers import ExistsOption - - klass = self._get_target_class() - - option1 = klass.write_option(exists=False) - self.assertIsInstance(option1, ExistsOption) - self.assertFalse(option1._exists) - - option2 = klass.write_option(exists=True) - self.assertIsInstance(option2, ExistsOption) - self.assertTrue(option2._exists) - - def test_write_open_neither_arg(self): - from google.cloud.firestore_v1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option() - - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR,)) - - def test_write_multiple_args(self): - from google.cloud.firestore_v1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option(exists=False, last_update_time=mock.sentinel.timestamp) - - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR,)) - - def test_write_bad_arg(self): - from google.cloud.firestore_v1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option(spinach="popeye") - - extra = "{!r} was provided".format("spinach") - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR, extra)) - - def test_collections(self): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1.collection import CollectionReference - - collection_ids = ["users", "projects"] - client = self._make_default_one() - firestore_api = mock.Mock(spec=["list_collection_ids"]) - client._firestore_api_internal = firestore_api - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - iterator = _Iterator(pages=[collection_ids]) - firestore_api.list_collection_ids.return_value = iterator - - collections = list(client.collections()) - - self.assertEqual(len(collections), len(collection_ids)) - for collection, collection_id in zip(collections, collection_ids): - self.assertIsInstance(collection, CollectionReference) - self.assertEqual(collection.parent, None) - self.assertEqual(collection.id, collection_id) - - base_path = client._database_string + "/documents" - firestore_api.list_collection_ids.assert_called_once_with( - base_path, metadata=client._rpc_metadata - ) - - def _get_all_helper(self, client, references, document_pbs, **kwargs): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["batch_get_documents"]) - response_iterator = iter(document_pbs) - firestore_api.batch_get_documents.return_value = response_iterator - - # Attach the fake GAPIC to a real client. - client._firestore_api_internal = firestore_api - - # Actually call get_all(). - snapshots = client.get_all(references, **kwargs) - self.assertIsInstance(snapshots, types.GeneratorType) - - return list(snapshots) - - def _info_for_get_all(self, data1, data2): - client = self._make_default_one() - document1 = client.document("pineapple", "lamp1") - document2 = client.document("pineapple", "lamp2") - - # Make response protobufs. - document_pb1, read_time = _doc_get_info(document1._document_path, data1) - response1 = _make_batch_response(found=document_pb1, read_time=read_time) - - document_pb2, read_time = _doc_get_info(document2._document_path, data2) - response2 = _make_batch_response(found=document_pb2, read_time=read_time) - - return client, document1, document2, response1, response2 - - def test_get_all(self): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.document import DocumentSnapshot - - data1 = {"a": u"cheese"} - data2 = {"b": True, "c": 18} - info = self._info_for_get_all(data1, data2) - client, document1, document2, response1, response2 = info - - # Exercise the mocked ``batch_get_documents``. - field_paths = ["a", "b"] - snapshots = self._get_all_helper( - client, - [document1, document2], - [response1, response2], - field_paths=field_paths, - ) - self.assertEqual(len(snapshots), 2) - - snapshot1 = snapshots[0] - self.assertIsInstance(snapshot1, DocumentSnapshot) - self.assertIs(snapshot1._reference, document1) - self.assertEqual(snapshot1._data, data1) - - snapshot2 = snapshots[1] - self.assertIsInstance(snapshot2, DocumentSnapshot) - self.assertIs(snapshot2._reference, document2) - self.assertEqual(snapshot2._data, data2) - - # Verify the call to the mock. - doc_paths = [document1._document_path, document2._document_path] - mask = common_pb2.DocumentMask(field_paths=field_paths) - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - mask, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_get_all_with_transaction(self): - from google.cloud.firestore_v1.document import DocumentSnapshot - - data = {"so-much": 484} - info = self._info_for_get_all(data, {}) - client, document, _, response, _ = info - transaction = client.transaction() - txn_id = b"the-man-is-non-stop" - transaction._id = txn_id - - # Exercise the mocked ``batch_get_documents``. - snapshots = self._get_all_helper( - client, [document], [response], transaction=transaction - ) - self.assertEqual(len(snapshots), 1) - - snapshot = snapshots[0] - self.assertIsInstance(snapshot, DocumentSnapshot) - self.assertIs(snapshot._reference, document) - self.assertEqual(snapshot._data, data) - - # Verify the call to the mock. - doc_paths = [document._document_path] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test_get_all_unknown_result(self): - from google.cloud.firestore_v1.client import _BAD_DOC_TEMPLATE - - info = self._info_for_get_all({"z": 28.5}, {}) - client, document, _, _, response = info - - # Exercise the mocked ``batch_get_documents``. - with self.assertRaises(ValueError) as exc_info: - self._get_all_helper(client, [document], [response]) - - err_msg = _BAD_DOC_TEMPLATE.format(response.found.name) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - # Verify the call to the mock. - doc_paths = [document._document_path] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_get_all_wrong_order(self): - from google.cloud.firestore_v1.document import DocumentSnapshot - - data1 = {"up": 10} - data2 = {"down": -10} - info = self._info_for_get_all(data1, data2) - client, document1, document2, response1, response2 = info - document3 = client.document("pineapple", "lamp3") - response3 = _make_batch_response(missing=document3._document_path) - - # Exercise the mocked ``batch_get_documents``. - snapshots = self._get_all_helper( - client, [document1, document2, document3], [response2, response1, response3] - ) - - self.assertEqual(len(snapshots), 3) - - snapshot1 = snapshots[0] - self.assertIsInstance(snapshot1, DocumentSnapshot) - self.assertIs(snapshot1._reference, document2) - self.assertEqual(snapshot1._data, data2) - - snapshot2 = snapshots[1] - self.assertIsInstance(snapshot2, DocumentSnapshot) - self.assertIs(snapshot2._reference, document1) - self.assertEqual(snapshot2._data, data1) - - self.assertFalse(snapshots[2].exists) - - # Verify the call to the mock. - doc_paths = [ - document1._document_path, - document2._document_path, - document3._document_path, - ] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_batch(self): - from google.cloud.firestore_v1.batch import WriteBatch - - client = self._make_default_one() - batch = client.batch() - self.assertIsInstance(batch, WriteBatch) - self.assertIs(batch._client, client) - self.assertEqual(batch._write_pbs, []) - - def test_transaction(self): - from google.cloud.firestore_v1.transaction import Transaction - - client = self._make_default_one() - transaction = client.transaction(max_attempts=3, read_only=True) - self.assertIsInstance(transaction, Transaction) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, 3) - self.assertTrue(transaction._read_only) - self.assertIsNone(transaction._id) - - -class Test__reference_info(unittest.TestCase): - @staticmethod - def _call_fut(references): - from google.cloud.firestore_v1.client import _reference_info - - return _reference_info(references) - - def test_it(self): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - client = Client(project="hi-projject", credentials=credentials) - - reference1 = client.document("a", "b") - reference2 = client.document("a", "b", "c", "d") - reference3 = client.document("a", "b") - reference4 = client.document("f", "g") - - doc_path1 = reference1._document_path - doc_path2 = reference2._document_path - doc_path3 = reference3._document_path - doc_path4 = reference4._document_path - self.assertEqual(doc_path1, doc_path3) - - document_paths, reference_map = self._call_fut( - [reference1, reference2, reference3, reference4] - ) - self.assertEqual(document_paths, [doc_path1, doc_path2, doc_path3, doc_path4]) - # reference3 over-rides reference1. - expected_map = { - doc_path2: reference2, - doc_path3: reference3, - doc_path4: reference4, - } - self.assertEqual(reference_map, expected_map) - - -class Test__get_reference(unittest.TestCase): - @staticmethod - def _call_fut(document_path, reference_map): - from google.cloud.firestore_v1.client import _get_reference - - return _get_reference(document_path, reference_map) - - def test_success(self): - doc_path = "a/b/c" - reference_map = {doc_path: mock.sentinel.reference} - self.assertIs(self._call_fut(doc_path, reference_map), mock.sentinel.reference) - - def test_failure(self): - from google.cloud.firestore_v1.client import _BAD_DOC_TEMPLATE - - doc_path = "1/888/call-now" - with self.assertRaises(ValueError) as exc_info: - self._call_fut(doc_path, {}) - - err_msg = _BAD_DOC_TEMPLATE.format(doc_path) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class Test__parse_batch_get(unittest.TestCase): - @staticmethod - def _call_fut(get_doc_response, reference_map, client=mock.sentinel.client): - from google.cloud.firestore_v1.client import _parse_batch_get - - return _parse_batch_get(get_doc_response, reference_map, client) - - @staticmethod - def _dummy_ref_string(): - from google.cloud.firestore_v1.client import DEFAULT_DATABASE - - project = u"bazzzz" - collection_id = u"fizz" - document_id = u"buzz" - return u"projects/{}/databases/{}/documents/{}/{}".format( - project, DEFAULT_DATABASE, collection_id, document_id - ) - - def test_found(self): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1.document import DocumentSnapshot - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - - ref_string = self._dummy_ref_string() - document_pb = document_pb2.Document( - name=ref_string, - fields={ - "foo": document_pb2.Value(double_value=1.5), - "bar": document_pb2.Value(string_value=u"skillz"), - }, - create_time=create_time, - update_time=update_time, - ) - response_pb = _make_batch_response(found=document_pb, read_time=read_time) - - reference_map = {ref_string: mock.sentinel.reference} - snapshot = self._call_fut(response_pb, reference_map) - self.assertIsInstance(snapshot, DocumentSnapshot) - self.assertIs(snapshot._reference, mock.sentinel.reference) - self.assertEqual(snapshot._data, {"foo": 1.5, "bar": u"skillz"}) - self.assertTrue(snapshot._exists) - self.assertEqual(snapshot.read_time, read_time) - self.assertEqual(snapshot.create_time, create_time) - self.assertEqual(snapshot.update_time, update_time) - - def test_missing(self): - from google.cloud.firestore_v1.document import DocumentReference - - ref_string = self._dummy_ref_string() - response_pb = _make_batch_response(missing=ref_string) - document = DocumentReference("fizz", "bazz", client=mock.sentinel.client) - reference_map = {ref_string: document} - snapshot = self._call_fut(response_pb, reference_map) - self.assertFalse(snapshot.exists) - self.assertEqual(snapshot.id, "bazz") - self.assertIsNone(snapshot._data) - - def test_unset_result_type(self): - response_pb = _make_batch_response() - with self.assertRaises(ValueError): - self._call_fut(response_pb, {}) - - def test_unknown_result_type(self): - response_pb = mock.Mock(spec=["WhichOneof"]) - response_pb.WhichOneof.return_value = "zoob_value" - - with self.assertRaises(ValueError): - self._call_fut(response_pb, {}) - - response_pb.WhichOneof.assert_called_once_with("result") - - -class Test__get_doc_mask(unittest.TestCase): - @staticmethod - def _call_fut(field_paths): - from google.cloud.firestore_v1.client import _get_doc_mask - - return _get_doc_mask(field_paths) - - def test_none(self): - self.assertIsNone(self._call_fut(None)) - - def test_paths(self): - from google.cloud.firestore_v1.proto import common_pb2 - - field_paths = ["a.b", "c"] - result = self._call_fut(field_paths) - expected = common_pb2.DocumentMask(field_paths=field_paths) - self.assertEqual(result, expected) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_batch_response(**kwargs): - from google.cloud.firestore_v1.proto import firestore_pb2 - - return firestore_pb2.BatchGetDocumentsResponse(**kwargs) - - -def _doc_get_info(ref_string, values): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1 import _helpers - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - - document_pb = document_pb2.Document( - name=ref_string, - fields=_helpers.encode_dict(values), - create_time=create_time, - update_time=update_time, - ) - - return document_pb, read_time diff --git a/firestore/tests/unit/v1/test_collection.py b/firestore/tests/unit/v1/test_collection.py deleted file mode 100644 index fde538b9db9c..000000000000 --- a/firestore/tests/unit/v1/test_collection.py +++ /dev/null @@ -1,578 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import types -import unittest - -import mock -import six - - -class TestCollectionReference(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.collection import CollectionReference - - return CollectionReference - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - @staticmethod - def _get_public_methods(klass): - return set( - name - for name, value in six.iteritems(klass.__dict__) - if (not name.startswith("_") and isinstance(value, types.FunctionType)) - ) - - def test_query_method_matching(self): - from google.cloud.firestore_v1.query import Query - - query_methods = self._get_public_methods(Query) - klass = self._get_target_class() - collection_methods = self._get_public_methods(klass) - # Make sure every query method is present on - # ``CollectionReference``. - self.assertLessEqual(query_methods, collection_methods) - - def test_constructor(self): - collection_id1 = "rooms" - document_id = "roomA" - collection_id2 = "messages" - client = mock.sentinel.client - - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - self.assertIs(collection._client, client) - expected_path = (collection_id1, document_id, collection_id2) - self.assertEqual(collection._path, expected_path) - - def test_constructor_invalid_path(self): - with self.assertRaises(ValueError): - self._make_one() - with self.assertRaises(ValueError): - self._make_one(99, "doc", "bad-collection-id") - with self.assertRaises(ValueError): - self._make_one("bad-document-ID", None, "sub-collection") - with self.assertRaises(ValueError): - self._make_one("Just", "A-Document") - - def test_constructor_invalid_kwarg(self): - with self.assertRaises(TypeError): - self._make_one("Coh-lek-shun", donut=True) - - def test___eq___other_type(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = object() - self.assertFalse(collection == other) - - def test___eq___different_path_same_client(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = self._make_one("other", client=client) - self.assertFalse(collection == other) - - def test___eq___same_path_different_client(self): - client = mock.sentinel.client - other_client = mock.sentinel.other_client - collection = self._make_one("name", client=client) - other = self._make_one("name", client=other_client) - self.assertFalse(collection == other) - - def test___eq___same_path_same_client(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = self._make_one("name", client=client) - self.assertTrue(collection == other) - - def test_id_property(self): - collection_id = "hi-bob" - collection = self._make_one(collection_id) - self.assertEqual(collection.id, collection_id) - - def test_parent_property(self): - from google.cloud.firestore_v1.document import DocumentReference - - collection_id1 = "grocery-store" - document_id = "market" - collection_id2 = "darth" - client = _make_client() - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - - parent = collection.parent - self.assertIsInstance(parent, DocumentReference) - self.assertIs(parent._client, client) - self.assertEqual(parent._path, (collection_id1, document_id)) - - def test_parent_property_top_level(self): - collection = self._make_one("tahp-leh-vull") - self.assertIsNone(collection.parent) - - def test_document_factory_explicit_id(self): - from google.cloud.firestore_v1.document import DocumentReference - - collection_id = "grocery-store" - document_id = "market" - client = _make_client() - collection = self._make_one(collection_id, client=client) - - child = collection.document(document_id) - self.assertIsInstance(child, DocumentReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_id, document_id)) - - @mock.patch( - "google.cloud.firestore_v1.collection._auto_id", - return_value="zorpzorpthreezorp012", - ) - def test_document_factory_auto_id(self, mock_auto_id): - from google.cloud.firestore_v1.document import DocumentReference - - collection_name = "space-town" - client = _make_client() - collection = self._make_one(collection_name, client=client) - - child = collection.document() - self.assertIsInstance(child, DocumentReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_name, mock_auto_id.return_value)) - - mock_auto_id.assert_called_once_with() - - def test__parent_info_top_level(self): - client = _make_client() - collection_id = "soap" - collection = self._make_one(collection_id, client=client) - - parent_path, expected_prefix = collection._parent_info() - - expected_path = "projects/{}/databases/{}/documents".format( - client.project, client._database - ) - self.assertEqual(parent_path, expected_path) - prefix = "{}/{}".format(expected_path, collection_id) - self.assertEqual(expected_prefix, prefix) - - def test__parent_info_nested(self): - collection_id1 = "bar" - document_id = "baz" - collection_id2 = "chunk" - client = _make_client() - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - - parent_path, expected_prefix = collection._parent_info() - - expected_path = "projects/{}/databases/{}/documents/{}/{}".format( - client.project, client._database, collection_id1, document_id - ) - self.assertEqual(parent_path, expected_path) - prefix = "{}/{}".format(expected_path, collection_id2) - self.assertEqual(expected_prefix, prefix) - - def test_add_auto_assigned(self): - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.document import DocumentReference - from google.cloud.firestore_v1 import SERVER_TIMESTAMP - from google.cloud.firestore_v1._helpers import pbs_for_create - - # Create a minimal fake GAPIC add attach it to a real client. - firestore_api = mock.Mock(spec=["create_document", "commit"]) - write_result = mock.Mock( - update_time=mock.sentinel.update_time, spec=["update_time"] - ) - commit_response = mock.Mock( - write_results=[write_result], - spec=["write_results", "commit_time"], - commit_time=mock.sentinel.commit_time, - ) - firestore_api.commit.return_value = commit_response - create_doc_response = document_pb2.Document() - firestore_api.create_document.return_value = create_doc_response - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a collection. - collection = self._make_one("grand-parent", "parent", "child", client=client) - - # Actually call add() on our collection; include a transform to make - # sure transforms during adds work. - document_data = {"been": "here", "now": SERVER_TIMESTAMP} - - patch = mock.patch("google.cloud.firestore_v1.collection._auto_id") - random_doc_id = "DEADBEEF" - with patch as patched: - patched.return_value = random_doc_id - update_time, document_ref = collection.add(document_data) - - # Verify the response and the mocks. - self.assertIs(update_time, mock.sentinel.update_time) - self.assertIsInstance(document_ref, DocumentReference) - self.assertIs(document_ref._client, client) - expected_path = collection._path + (random_doc_id,) - self.assertEqual(document_ref._path, expected_path) - - write_pbs = pbs_for_create(document_ref._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - # Since we generate the ID locally, we don't call 'create_document'. - firestore_api.create_document.assert_not_called() - - @staticmethod - def _write_pb_for_create(document_path, document_data): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ), - current_document=common_pb2.Precondition(exists=False), - ) - - def test_add_explicit_id(self): - from google.cloud.firestore_v1.document import DocumentReference - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - write_result = mock.Mock( - update_time=mock.sentinel.update_time, spec=["update_time"] - ) - commit_response = mock.Mock( - write_results=[write_result], - spec=["write_results", "commit_time"], - commit_time=mock.sentinel.commit_time, - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a collection and call add(). - collection = self._make_one("parent", client=client) - document_data = {"zorp": 208.75, "i-did-not": b"know that"} - doc_id = "child" - update_time, document_ref = collection.add(document_data, document_id=doc_id) - - # Verify the response and the mocks. - self.assertIs(update_time, mock.sentinel.update_time) - self.assertIsInstance(document_ref, DocumentReference) - self.assertIs(document_ref._client, client) - self.assertEqual(document_ref._path, (collection.id, doc_id)) - - write_pb = self._write_pb_for_create(document_ref._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_select(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - field_paths = ["a", "b"] - query = collection.select(field_paths) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - projection_paths = [ - field_ref.field_path for field_ref in query._projection.fields - ] - self.assertEqual(projection_paths, field_paths) - - @staticmethod - def _make_field_filter_pb(field_path, op_string, value): - from google.cloud.firestore_v1.proto import query_pb2 - from google.cloud.firestore_v1 import _helpers - from google.cloud.firestore_v1.query import _enum_from_op_string - - return query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=_enum_from_op_string(op_string), - value=_helpers.encode_value(value), - ) - - def test_where(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - field_path = "foo" - op_string = "==" - value = 45 - query = collection.where(field_path, op_string, value) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(len(query._field_filters), 1) - field_filter_pb = query._field_filters[0] - self.assertEqual( - field_filter_pb, self._make_field_filter_pb(field_path, op_string, value) - ) - - @staticmethod - def _make_order_pb(field_path, direction): - from google.cloud.firestore_v1.proto import query_pb2 - from google.cloud.firestore_v1.query import _enum_from_direction - - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=_enum_from_direction(direction), - ) - - def test_order_by(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - field_path = "foo" - direction = Query.DESCENDING - query = collection.order_by(field_path, direction=direction) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(len(query._orders), 1) - order_pb = query._orders[0] - self.assertEqual(order_pb, self._make_order_pb(field_path, direction)) - - def test_limit(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - limit = 15 - query = collection.limit(limit) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._limit, limit) - - def test_offset(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - offset = 113 - query = collection.offset(offset) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._offset, offset) - - def test_start_at(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - doc_fields = {"a": "b"} - query = collection.start_at(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._start_at, (doc_fields, True)) - - def test_start_after(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - doc_fields = {"d": "foo", "e": 10} - query = collection.start_after(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._start_at, (doc_fields, False)) - - def test_end_before(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - doc_fields = {"bar": 10.5} - query = collection.end_before(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._end_at, (doc_fields, True)) - - def test_end_at(self): - from google.cloud.firestore_v1.query import Query - - collection = self._make_one("collection") - doc_fields = {"opportunity": True, "reason": 9} - query = collection.end_at(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._end_at, (doc_fields, False)) - - def _list_documents_helper(self, page_size=None): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1.document import DocumentReference - from google.cloud.firestore_v1.gapic.firestore_client import FirestoreClient - from google.cloud.firestore_v1.proto.document_pb2 import Document - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - client = _make_client() - template = client._database_string + "/documents/{}" - document_ids = ["doc-1", "doc-2"] - documents = [ - Document(name=template.format(document_id)) for document_id in document_ids - ] - iterator = _Iterator(pages=[documents]) - api_client = mock.create_autospec(FirestoreClient) - api_client.list_documents.return_value = iterator - client._firestore_api_internal = api_client - collection = self._make_one("collection", client=client) - - if page_size is not None: - documents = list(collection.list_documents(page_size=page_size)) - else: - documents = list(collection.list_documents()) - - # Verify the response and the mocks. - self.assertEqual(len(documents), len(document_ids)) - for document, document_id in zip(documents, document_ids): - self.assertIsInstance(document, DocumentReference) - self.assertEqual(document.parent, collection) - self.assertEqual(document.id, document_id) - - parent, _ = collection._parent_info() - api_client.list_documents.assert_called_once_with( - parent, - collection.id, - page_size=page_size, - show_missing=True, - metadata=client._rpc_metadata, - ) - - def test_list_documents_wo_page_size(self): - self._list_documents_helper() - - def test_list_documents_w_page_size(self): - self._list_documents_helper(page_size=25) - - @mock.patch("google.cloud.firestore_v1.query.Query", autospec=True) - def test_get(self, query_class): - import warnings - - collection = self._make_one("collection") - with warnings.catch_warnings(record=True) as warned: - get_response = collection.get() - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(get_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=None) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - @mock.patch("google.cloud.firestore_v1.query.Query", autospec=True) - def test_get_with_transaction(self, query_class): - import warnings - - collection = self._make_one("collection") - transaction = mock.sentinel.txn - with warnings.catch_warnings(record=True) as warned: - get_response = collection.get(transaction=transaction) - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(get_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=transaction) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - @mock.patch("google.cloud.firestore_v1.query.Query", autospec=True) - def test_stream(self, query_class): - collection = self._make_one("collection") - stream_response = collection.stream() - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(stream_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=None) - - @mock.patch("google.cloud.firestore_v1.query.Query", autospec=True) - def test_stream_with_transaction(self, query_class): - collection = self._make_one("collection") - transaction = mock.sentinel.txn - stream_response = collection.stream(transaction=transaction) - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(stream_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=transaction) - - @mock.patch("google.cloud.firestore_v1.collection.Watch", autospec=True) - def test_on_snapshot(self, watch): - collection = self._make_one("collection") - collection.on_snapshot(None) - watch.for_query.assert_called_once() - - -class Test__auto_id(unittest.TestCase): - @staticmethod - def _call_fut(): - from google.cloud.firestore_v1.collection import _auto_id - - return _auto_id() - - @mock.patch("random.choice") - def test_it(self, mock_rand_choice): - from google.cloud.firestore_v1.collection import _AUTO_ID_CHARS - - mock_result = "0123456789abcdefghij" - mock_rand_choice.side_effect = list(mock_result) - result = self._call_fut() - self.assertEqual(result, mock_result) - - mock_calls = [mock.call(_AUTO_ID_CHARS)] * 20 - self.assertEqual(mock_rand_choice.mock_calls, mock_calls) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project="project-project", credentials=credentials) diff --git a/firestore/tests/unit/v1/test_cross_language.py b/firestore/tests/unit/v1/test_cross_language.py deleted file mode 100644 index 3e0983cd411d..000000000000 --- a/firestore/tests/unit/v1/test_cross_language.py +++ /dev/null @@ -1,521 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import glob -import json -import os - -import mock -import pytest - -from google.protobuf import json_format -from google.cloud.firestore_v1.proto import document_pb2 -from google.cloud.firestore_v1.proto import firestore_pb2 -from google.cloud.firestore_v1.proto import tests_pb2 -from google.cloud.firestore_v1.proto import write_pb2 - - -def _load_test_json(filename): - with open(filename, "r") as tp_file: - tp_json = json.load(tp_file) - test_file = tests_pb2.TestFile() - json_format.ParseDict(tp_json, test_file) - shortname = os.path.split(filename)[-1] - for test_proto in test_file.tests: - test_proto.description = test_proto.description + " (%s)" % shortname - yield test_proto - - -_here = os.path.dirname(__file__) -_glob_expr = "{}/testdata/*.json".format(_here) -_globs = glob.glob(_glob_expr) -ALL_TESTPROTOS = [] -for filename in sorted(_globs): - ALL_TESTPROTOS.extend(_load_test_json(filename)) - -_CREATE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "create" -] - -_GET_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "get" -] - -_SET_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "set" -] - -_UPDATE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "update" -] - -_UPDATE_PATHS_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "update_paths" -] - -_DELETE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "delete" -] - -_LISTEN_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "listen" -] - -_QUERY_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "query" -] - - -def _mock_firestore_api(): - firestore_api = mock.Mock(spec=["commit"]) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - return firestore_api - - -def _make_client_document(firestore_api, testcase): - from google.cloud.firestore_v1 import Client - from google.cloud.firestore_v1.client import DEFAULT_DATABASE - import google.auth.credentials - - _, project, _, database, _, doc_path = testcase.doc_ref_path.split("/", 5) - assert database == DEFAULT_DATABASE - - # Attach the fake GAPIC to a real client. - credentials = mock.Mock(spec=google.auth.credentials.Credentials) - client = Client(project=project, credentials=credentials) - client._firestore_api_internal = firestore_api - return client, client.document(doc_path) - - -def _run_testcase(testcase, call, firestore_api, client): - if getattr(testcase, "is_error", False): - # TODO: is there a subclass of Exception we can check for? - with pytest.raises(Exception): - call() - else: - call() - firestore_api.commit.assert_called_once_with( - client._database_string, - list(testcase.request.writes), - transaction=None, - metadata=client._rpc_metadata, - ) - - -@pytest.mark.parametrize("test_proto", _CREATE_TESTPROTOS) -def test_create_testprotos(test_proto): - testcase = test_proto.create - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - call = functools.partial(document.create, data) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _GET_TESTPROTOS) -def test_get_testprotos(test_proto): - testcase = test_proto.get - firestore_api = mock.Mock(spec=["get_document"]) - response = document_pb2.Document() - firestore_api.get_document.return_value = response - client, document = _make_client_document(firestore_api, testcase) - - document.get() # No '.textprotos' for errors, field_paths. - - firestore_api.get_document.assert_called_once_with( - document._document_path, - mask=None, - transaction=None, - metadata=client._rpc_metadata, - ) - - -@pytest.mark.parametrize("test_proto", _SET_TESTPROTOS) -def test_set_testprotos(test_proto): - testcase = test_proto.set - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - if testcase.HasField("option"): - merge = convert_set_option(testcase.option) - else: - merge = False - call = functools.partial(document.set, data, merge=merge) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _UPDATE_TESTPROTOS) -def test_update_testprotos(test_proto): - testcase = test_proto.update - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - if testcase.HasField("precondition"): - option = convert_precondition(testcase.precondition) - else: - option = None - call = functools.partial(document.update, data, option) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.skip(reason="Python has no way to call update with a list of field paths.") -@pytest.mark.parametrize("test_proto", _UPDATE_PATHS_TESTPROTOS) -def test_update_paths_testprotos(test_proto): # pragma: NO COVER - pass - - -@pytest.mark.parametrize("test_proto", _DELETE_TESTPROTOS) -def test_delete_testprotos(test_proto): - testcase = test_proto.delete - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - if testcase.HasField("precondition"): - option = convert_precondition(testcase.precondition) - else: - option = None - call = functools.partial(document.delete, option) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _LISTEN_TESTPROTOS) -def test_listen_testprotos(test_proto): # pragma: NO COVER - # test_proto.listen has 'reponses' messages, - # 'google.firestore_v1.ListenResponse' - # and then an expected list of 'snapshots' (local 'Snapshot'), containing - # 'docs' (list of 'google.firestore_v1.Document'), - # 'changes' (list lof local 'DocChange', and 'read_time' timestamp. - from google.cloud.firestore_v1 import Client - from google.cloud.firestore_v1 import DocumentReference - from google.cloud.firestore_v1 import DocumentSnapshot - from google.cloud.firestore_v1 import Watch - import google.auth.credentials - - testcase = test_proto.listen - testname = test_proto.description - - credentials = mock.Mock(spec=google.auth.credentials.Credentials) - client = Client(project="project", credentials=credentials) - modulename = "google.cloud.firestore_v1.watch" - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - with mock.patch( # conformance data sets WATCH_TARGET_ID to 1 - "%s.WATCH_TARGET_ID" % modulename, 1 - ): - snapshots = [] - - def callback(keys, applied_changes, read_time): - snapshots.append((keys, applied_changes, read_time)) - - collection = DummyCollection(client=client) - query = DummyQuery(parent=collection) - watch = Watch.for_query( - query, callback, DocumentSnapshot, DocumentReference - ) - # conformance data has db string as this - db_str = "projects/projectID/databases/(default)" - watch._firestore._database_string_internal = db_str - - if testcase.is_error: - try: - for proto in testcase.responses: - watch.on_snapshot(proto) - except RuntimeError: - # listen-target-add-wrong-id.textpro - # listen-target-remove.textpro - pass - - else: - for proto in testcase.responses: - watch.on_snapshot(proto) - - assert len(snapshots) == len(testcase.snapshots) - for i, (expected_snapshot, actual_snapshot) in enumerate( - zip(testcase.snapshots, snapshots) - ): - expected_changes = expected_snapshot.changes - actual_changes = actual_snapshot[1] - if len(expected_changes) != len(actual_changes): - raise AssertionError( - "change length mismatch in %s (snapshot #%s)" - % (testname, i) - ) - for y, (expected_change, actual_change) in enumerate( - zip(expected_changes, actual_changes) - ): - expected_change_kind = expected_change.kind - actual_change_kind = actual_change.type.value - if expected_change_kind != actual_change_kind: - raise AssertionError( - "change type mismatch in %s (snapshot #%s, change #%s')" - % (testname, i, y) - ) - - -@pytest.mark.parametrize("test_proto", _QUERY_TESTPROTOS) -def test_query_testprotos(test_proto): # pragma: NO COVER - testcase = test_proto.query - if testcase.is_error: - with pytest.raises(Exception): - query = parse_query(testcase) - query._to_protobuf() - else: - query = parse_query(testcase) - found = query._to_protobuf() - assert found == testcase.query - - -def convert_data(v): - # Replace the strings 'ServerTimestamp' and 'Delete' with the corresponding - # sentinels. - from google.cloud.firestore_v1 import ArrayRemove - from google.cloud.firestore_v1 import ArrayUnion - from google.cloud.firestore_v1 import DELETE_FIELD - from google.cloud.firestore_v1 import SERVER_TIMESTAMP - - if v == "ServerTimestamp": - return SERVER_TIMESTAMP - elif v == "Delete": - return DELETE_FIELD - elif isinstance(v, list): - if v[0] == "ArrayRemove": - return ArrayRemove([convert_data(e) for e in v[1:]]) - if v[0] == "ArrayUnion": - return ArrayUnion([convert_data(e) for e in v[1:]]) - return [convert_data(e) for e in v] - elif isinstance(v, dict): - return {k: convert_data(v2) for k, v2 in v.items()} - elif v == "NaN": - return float(v) - else: - return v - - -def convert_set_option(option): - from google.cloud.firestore_v1 import _helpers - - if option.fields: - return [ - _helpers.FieldPath(*field.field).to_api_repr() for field in option.fields - ] - - assert option.all - return True - - -def convert_precondition(precond): - from google.cloud.firestore_v1 import Client - - if precond.HasField("exists"): - return Client.write_option(exists=precond.exists) - - assert precond.HasField("update_time") - return Client.write_option(last_update_time=precond.update_time) - - -class DummyRpc(object): # pragma: NO COVER - def __init__( - self, - listen, - should_recover, - should_terminate=None, - initial_request=None, - metadata=None, - ): - self.listen = listen - self.initial_request = initial_request - self.should_recover = should_recover - self.should_terminate = should_terminate - self.closed = False - self.callbacks = [] - self._metadata = metadata - - def add_done_callback(self, callback): - self.callbacks.append(callback) - - def close(self): - self.closed = True - - -class DummyBackgroundConsumer(object): # pragma: NO COVER - started = False - stopped = False - is_active = True - - def __init__(self, rpc, on_snapshot): - self._rpc = rpc - self.on_snapshot = on_snapshot - - def start(self): - self.started = True - - def stop(self): - self.stopped = True - self.is_active = False - - -class DummyCollection(object): - def __init__(self, client, parent=None): - self._client = client - self._parent = parent - - def _parent_info(self): - return "{}/documents".format(self._client._database_string), None - - -class DummyQuery(object): # pragma: NO COVER - def __init__(self, parent): - self._parent = parent - self._comparator = lambda x, y: 1 - - @property - def _client(self): - return self._parent._client - - def _to_protobuf(self): - from google.cloud.firestore_v1.proto import query_pb2 - - query_kwargs = { - "select": None, - "from": None, - "where": None, - "order_by": None, - "start_at": None, - "end_at": None, - } - return query_pb2.StructuredQuery(**query_kwargs) - - -def parse_query(testcase): - # 'query' testcase contains: - # - 'coll_path': collection ref path. - # - 'clauses': array of one or more 'Clause' elements - # - 'query': the actual google.firestore_v1.StructuredQuery message - # to be constructed. - # - 'is_error' (as other testcases). - # - # 'Clause' elements are unions of: - # - 'select': [field paths] - # - 'where': (field_path, op, json_value) - # - 'order_by': (field_path, direction) - # - 'offset': int - # - 'limit': int - # - 'start_at': 'Cursor' - # - 'start_after': 'Cursor' - # - 'end_at': 'Cursor' - # - 'end_before': 'Cursor' - # - # 'Cursor' contains either: - # - 'doc_snapshot': 'DocSnapshot' - # - 'json_values': [string] - # - # 'DocSnapshot' contains: - # 'path': str - # 'json_data': str - from google.auth.credentials import Credentials - from google.cloud.firestore_v1 import Client - from google.cloud.firestore_v1 import Query - - _directions = {"asc": Query.ASCENDING, "desc": Query.DESCENDING} - - credentials = mock.create_autospec(Credentials) - client = Client("projectID", credentials) - path = parse_path(testcase.coll_path) - collection = client.collection(*path) - query = collection - - for clause in testcase.clauses: - kind = clause.WhichOneof("clause") - - if kind == "select": - field_paths = [ - ".".join(field_path.field) for field_path in clause.select.fields - ] - query = query.select(field_paths) - elif kind == "where": - path = ".".join(clause.where.path.field) - value = convert_data(json.loads(clause.where.json_value)) - query = query.where(path, clause.where.op, value) - elif kind == "order_by": - path = ".".join(clause.order_by.path.field) - direction = clause.order_by.direction - direction = _directions.get(direction, direction) - query = query.order_by(path, direction=direction) - elif kind == "offset": - query = query.offset(clause.offset) - elif kind == "limit": - query = query.limit(clause.limit) - elif kind == "start_at": - cursor = parse_cursor(clause.start_at, client) - query = query.start_at(cursor) - elif kind == "start_after": - cursor = parse_cursor(clause.start_after, client) - query = query.start_after(cursor) - elif kind == "end_at": - cursor = parse_cursor(clause.end_at, client) - query = query.end_at(cursor) - elif kind == "end_before": - cursor = parse_cursor(clause.end_before, client) - query = query.end_before(cursor) - else: # pragma: NO COVER - raise ValueError("Unknown query clause: {}".format(kind)) - - return query - - -def parse_path(path): - _, relative = path.split("documents/") - return relative.split("/") - - -def parse_cursor(cursor, client): - from google.cloud.firestore_v1 import DocumentReference - from google.cloud.firestore_v1 import DocumentSnapshot - - if cursor.HasField("doc_snapshot"): - path = parse_path(cursor.doc_snapshot.path) - doc_ref = DocumentReference(*path, client=client) - - return DocumentSnapshot( - reference=doc_ref, - data=json.loads(cursor.doc_snapshot.json_data), - exists=True, - read_time=None, - create_time=None, - update_time=None, - ) - - values = [json.loads(value) for value in cursor.json_values] - return convert_data(values) diff --git a/firestore/tests/unit/v1/test_document.py b/firestore/tests/unit/v1/test_document.py deleted file mode 100644 index 89a19df674dd..000000000000 --- a/firestore/tests/unit/v1/test_document.py +++ /dev/null @@ -1,825 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import unittest - -import mock - - -class TestDocumentReference(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.document import DocumentReference - - return DocumentReference - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - collection_id1 = "users" - document_id1 = "alovelace" - collection_id2 = "platform" - document_id2 = "*nix" - client = mock.MagicMock() - client.__hash__.return_value = 1234 - - document = self._make_one( - collection_id1, document_id1, collection_id2, document_id2, client=client - ) - self.assertIs(document._client, client) - expected_path = "/".join( - (collection_id1, document_id1, collection_id2, document_id2) - ) - self.assertEqual(document.path, expected_path) - - def test_constructor_invalid_path(self): - with self.assertRaises(ValueError): - self._make_one() - with self.assertRaises(ValueError): - self._make_one(None, "before", "bad-collection-id", "fifteen") - with self.assertRaises(ValueError): - self._make_one("bad-document-ID", None) - with self.assertRaises(ValueError): - self._make_one("Just", "A-Collection", "Sub") - - def test_constructor_invalid_kwarg(self): - with self.assertRaises(TypeError): - self._make_one("Coh-lek-shun", "Dahk-yu-mehnt", burger=18.75) - - def test___copy__(self): - client = _make_client("rain") - document = self._make_one("a", "b", client=client) - # Access the document path so it is copied. - doc_path = document._document_path - self.assertEqual(doc_path, document._document_path_internal) - - new_document = document.__copy__() - self.assertIsNot(new_document, document) - self.assertIs(new_document._client, document._client) - self.assertEqual(new_document._path, document._path) - self.assertEqual( - new_document._document_path_internal, document._document_path_internal - ) - - def test___deepcopy__calls_copy(self): - client = mock.sentinel.client - document = self._make_one("a", "b", client=client) - document.__copy__ = mock.Mock(return_value=mock.sentinel.new_doc, spec=[]) - - unused_memo = {} - new_document = document.__deepcopy__(unused_memo) - self.assertIs(new_document, mock.sentinel.new_doc) - document.__copy__.assert_called_once_with() - - def test__eq__same_type(self): - document1 = self._make_one("X", "YY", client=mock.sentinel.client) - document2 = self._make_one("X", "ZZ", client=mock.sentinel.client) - document3 = self._make_one("X", "YY", client=mock.sentinel.client2) - document4 = self._make_one("X", "YY", client=mock.sentinel.client) - - pairs = ((document1, document2), (document1, document3), (document2, document3)) - for candidate1, candidate2 in pairs: - # We use == explicitly since assertNotEqual would use !=. - equality_val = candidate1 == candidate2 - self.assertFalse(equality_val) - - # Check the only equal one. - self.assertEqual(document1, document4) - self.assertIsNot(document1, document4) - - def test__eq__other_type(self): - document = self._make_one("X", "YY", client=mock.sentinel.client) - other = object() - equality_val = document == other - self.assertFalse(equality_val) - self.assertIs(document.__eq__(other), NotImplemented) - - def test___hash__(self): - client = mock.MagicMock() - client.__hash__.return_value = 234566789 - document = self._make_one("X", "YY", client=client) - self.assertEqual(hash(document), hash(("X", "YY")) + hash(client)) - - def test__ne__same_type(self): - document1 = self._make_one("X", "YY", client=mock.sentinel.client) - document2 = self._make_one("X", "ZZ", client=mock.sentinel.client) - document3 = self._make_one("X", "YY", client=mock.sentinel.client2) - document4 = self._make_one("X", "YY", client=mock.sentinel.client) - - self.assertNotEqual(document1, document2) - self.assertNotEqual(document1, document3) - self.assertNotEqual(document2, document3) - - # We use != explicitly since assertEqual would use ==. - inequality_val = document1 != document4 - self.assertFalse(inequality_val) - self.assertIsNot(document1, document4) - - def test__ne__other_type(self): - document = self._make_one("X", "YY", client=mock.sentinel.client) - other = object() - self.assertNotEqual(document, other) - self.assertIs(document.__ne__(other), NotImplemented) - - def test__document_path_property(self): - project = "hi-its-me-ok-bye" - client = _make_client(project=project) - - collection_id = "then" - document_id = "090909iii" - document = self._make_one(collection_id, document_id, client=client) - doc_path = document._document_path - expected = "projects/{}/databases/{}/documents/{}/{}".format( - project, client._database, collection_id, document_id - ) - self.assertEqual(doc_path, expected) - self.assertIs(document._document_path_internal, doc_path) - - # Make sure value is cached. - document._document_path_internal = mock.sentinel.cached - self.assertIs(document._document_path, mock.sentinel.cached) - - def test__document_path_property_no_client(self): - document = self._make_one("hi", "bye") - self.assertIsNone(document._client) - with self.assertRaises(ValueError): - getattr(document, "_document_path") - - self.assertIsNone(document._document_path_internal) - - def test_id_property(self): - document_id = "867-5309" - document = self._make_one("Co-lek-shun", document_id) - self.assertEqual(document.id, document_id) - - def test_parent_property(self): - from google.cloud.firestore_v1.collection import CollectionReference - - collection_id = "grocery-store" - document_id = "market" - client = _make_client() - document = self._make_one(collection_id, document_id, client=client) - - parent = document.parent - self.assertIsInstance(parent, CollectionReference) - self.assertIs(parent._client, client) - self.assertEqual(parent._path, (collection_id,)) - - def test_collection_factory(self): - from google.cloud.firestore_v1.collection import CollectionReference - - collection_id = "grocery-store" - document_id = "market" - new_collection = "fruits" - client = _make_client() - document = self._make_one(collection_id, document_id, client=client) - - child = document.collection(new_collection) - self.assertIsInstance(child, CollectionReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_id, document_id, new_collection)) - - @staticmethod - def _write_pb_for_create(document_path, document_data): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ), - current_document=common_pb2.Precondition(exists=False), - ) - - @staticmethod - def _make_commit_repsonse(write_results=None): - from google.cloud.firestore_v1.proto import firestore_pb2 - - response = mock.create_autospec(firestore_pb2.CommitResponse) - response.write_results = write_results or [mock.sentinel.write_result] - response.commit_time = mock.sentinel.commit_time - return response - - def test_create(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("dignity") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("foo", "twelve", client=client) - document_data = {"hello": "goodbye", "count": 99} - write_result = document.create(document_data) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - write_pb = self._write_pb_for_create(document._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_create_empty(self): - # Create a minimal fake GAPIC with a dummy response. - from google.cloud.firestore_v1.document import DocumentReference - from google.cloud.firestore_v1.document import DocumentSnapshot - - firestore_api = mock.Mock(spec=["commit"]) - document_reference = mock.create_autospec(DocumentReference) - snapshot = mock.create_autospec(DocumentSnapshot) - snapshot.exists = True - document_reference.get.return_value = snapshot - firestore_api.commit.return_value = self._make_commit_repsonse( - write_results=[document_reference] - ) - - # Attach the fake GAPIC to a real client. - client = _make_client("dignity") - client._firestore_api_internal = firestore_api - client.get_all = mock.MagicMock() - client.get_all.exists.return_value = True - - # Actually make a document and call create(). - document = self._make_one("foo", "twelve", client=client) - document_data = {} - write_result = document.create(document_data) - self.assertTrue(write_result.get().exists) - - @staticmethod - def _write_pb_for_set(document_path, document_data, merge): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1 import _helpers - - write_pbs = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ) - ) - if merge: - field_paths = [ - field_path - for field_path, value in _helpers.extract_fields( - document_data, _helpers.FieldPath() - ) - ] - field_paths = [ - field_path.to_api_repr() for field_path in sorted(field_paths) - ] - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - write_pbs.update_mask.CopyFrom(mask) - return write_pbs - - def _set_helper(self, merge=False, **option_kwargs): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("db-dee-bee") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("User", "Interface", client=client) - document_data = {"And": 500, "Now": b"\xba\xaa\xaa \xba\xaa\xaa"} - write_result = document.set(document_data, merge) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - write_pb = self._write_pb_for_set(document._document_path, document_data, merge) - - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_set(self): - self._set_helper() - - def test_set_merge(self): - self._set_helper(merge=True) - - @staticmethod - def _write_pb_for_update(document_path, update_values, field_paths): - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(update_values) - ), - update_mask=common_pb2.DocumentMask(field_paths=field_paths), - current_document=common_pb2.Precondition(exists=True), - ) - - def _update_helper(self, **option_kwargs): - from google.cloud.firestore_v1.transforms import DELETE_FIELD - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("potato-chip") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("baked", "Alaska", client=client) - # "Cheat" and use OrderedDict-s so that iteritems() is deterministic. - field_updates = collections.OrderedDict( - (("hello", 1), ("then.do", False), ("goodbye", DELETE_FIELD)) - ) - if option_kwargs: - option = client.write_option(**option_kwargs) - write_result = document.update(field_updates, option=option) - else: - option = None - write_result = document.update(field_updates) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - update_values = { - "hello": field_updates["hello"], - "then": {"do": field_updates["then.do"]}, - } - field_paths = list(field_updates.keys()) - write_pb = self._write_pb_for_update( - document._document_path, update_values, sorted(field_paths) - ) - if option is not None: - option.modify_write(write_pb) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_update_with_exists(self): - with self.assertRaises(ValueError): - self._update_helper(exists=True) - - def test_update(self): - self._update_helper() - - def test_update_with_precondition(self): - from google.protobuf import timestamp_pb2 - - timestamp = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244) - self._update_helper(last_update_time=timestamp) - - def test_empty_update(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("potato-chip") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("baked", "Alaska", client=client) - # "Cheat" and use OrderedDict-s so that iteritems() is deterministic. - field_updates = {} - with self.assertRaises(ValueError): - document.update(field_updates) - - def _delete_helper(self, **option_kwargs): - from google.cloud.firestore_v1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("donut-base") - client._firestore_api_internal = firestore_api - - # Actually make a document and call delete(). - document = self._make_one("where", "we-are", client=client) - if option_kwargs: - option = client.write_option(**option_kwargs) - delete_time = document.delete(option=option) - else: - option = None - delete_time = document.delete() - - # Verify the response and the mocks. - self.assertIs(delete_time, mock.sentinel.commit_time) - write_pb = write_pb2.Write(delete=document._document_path) - if option is not None: - option.modify_write(write_pb) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_delete(self): - self._delete_helper() - - def test_delete_with_option(self): - from google.protobuf import timestamp_pb2 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244) - self._delete_helper(last_update_time=timestamp_pb) - - def _get_helper(self, field_paths=None, use_transaction=False, not_found=False): - from google.api_core.exceptions import NotFound - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.transaction import Transaction - - # Create a minimal fake GAPIC with a dummy response. - create_time = 123 - update_time = 234 - firestore_api = mock.Mock(spec=["get_document"]) - response = mock.create_autospec(document_pb2.Document) - response.fields = {} - response.create_time = create_time - response.update_time = update_time - - if not_found: - firestore_api.get_document.side_effect = NotFound("testing") - else: - firestore_api.get_document.return_value = response - - client = _make_client("donut-base") - client._firestore_api_internal = firestore_api - - document = self._make_one("where", "we-are", client=client) - - if use_transaction: - transaction = Transaction(client) - transaction_id = transaction._id = b"asking-me-2" - else: - transaction = None - - snapshot = document.get(field_paths=field_paths, transaction=transaction) - - self.assertIs(snapshot.reference, document) - if not_found: - self.assertIsNone(snapshot._data) - self.assertFalse(snapshot.exists) - self.assertIsNone(snapshot.read_time) - self.assertIsNone(snapshot.create_time) - self.assertIsNone(snapshot.update_time) - else: - self.assertEqual(snapshot.to_dict(), {}) - self.assertTrue(snapshot.exists) - self.assertIsNone(snapshot.read_time) - self.assertIs(snapshot.create_time, create_time) - self.assertIs(snapshot.update_time, update_time) - - # Verify the request made to the API - if field_paths is not None: - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - else: - mask = None - - if use_transaction: - expected_transaction_id = transaction_id - else: - expected_transaction_id = None - - firestore_api.get_document.assert_called_once_with( - document._document_path, - mask=mask, - transaction=expected_transaction_id, - metadata=client._rpc_metadata, - ) - - def test_get_not_found(self): - self._get_helper(not_found=True) - - def test_get_default(self): - self._get_helper() - - def test_get_w_string_field_path(self): - with self.assertRaises(ValueError): - self._get_helper(field_paths="foo") - - def test_get_with_field_path(self): - self._get_helper(field_paths=["foo"]) - - def test_get_with_multiple_field_paths(self): - self._get_helper(field_paths=["foo", "bar.baz"]) - - def test_get_with_transaction(self): - self._get_helper(use_transaction=True) - - def _collections_helper(self, page_size=None): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1.collection import CollectionReference - from google.cloud.firestore_v1.gapic.firestore_client import FirestoreClient - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - collection_ids = ["coll-1", "coll-2"] - iterator = _Iterator(pages=[collection_ids]) - api_client = mock.create_autospec(FirestoreClient) - api_client.list_collection_ids.return_value = iterator - - client = _make_client() - client._firestore_api_internal = api_client - - # Actually make a document and call delete(). - document = self._make_one("where", "we-are", client=client) - if page_size is not None: - collections = list(document.collections(page_size=page_size)) - else: - collections = list(document.collections()) - - # Verify the response and the mocks. - self.assertEqual(len(collections), len(collection_ids)) - for collection, collection_id in zip(collections, collection_ids): - self.assertIsInstance(collection, CollectionReference) - self.assertEqual(collection.parent, document) - self.assertEqual(collection.id, collection_id) - - api_client.list_collection_ids.assert_called_once_with( - document._document_path, page_size=page_size, metadata=client._rpc_metadata - ) - - def test_collections_wo_page_size(self): - self._collections_helper() - - def test_collections_w_page_size(self): - self._collections_helper(page_size=10) - - @mock.patch("google.cloud.firestore_v1.document.Watch", autospec=True) - def test_on_snapshot(self, watch): - client = mock.Mock(_database_string="sprinklez", spec=["_database_string"]) - document = self._make_one("yellow", "mellow", client=client) - document.on_snapshot(None) - watch.for_document.assert_called_once() - - -class TestDocumentSnapshot(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.document import DocumentSnapshot - - return DocumentSnapshot - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def _make_reference(self, *args, **kwargs): - from google.cloud.firestore_v1.document import DocumentReference - - return DocumentReference(*args, **kwargs) - - def _make_w_ref(self, ref_path=("a", "b"), data={}, exists=True): - client = mock.sentinel.client - reference = self._make_reference(*ref_path, client=client) - return self._make_one( - reference, - data, - exists, - mock.sentinel.read_time, - mock.sentinel.create_time, - mock.sentinel.update_time, - ) - - def test_constructor(self): - client = mock.sentinel.client - reference = self._make_reference("hi", "bye", client=client) - data = {"zoop": 83} - snapshot = self._make_one( - reference, - data, - True, - mock.sentinel.read_time, - mock.sentinel.create_time, - mock.sentinel.update_time, - ) - self.assertIs(snapshot._reference, reference) - self.assertEqual(snapshot._data, data) - self.assertIsNot(snapshot._data, data) # Make sure copied. - self.assertTrue(snapshot._exists) - self.assertIs(snapshot.read_time, mock.sentinel.read_time) - self.assertIs(snapshot.create_time, mock.sentinel.create_time) - self.assertIs(snapshot.update_time, mock.sentinel.update_time) - - def test___eq___other_type(self): - snapshot = self._make_w_ref() - other = object() - self.assertFalse(snapshot == other) - - def test___eq___different_reference_same_data(self): - snapshot = self._make_w_ref(("a", "b")) - other = self._make_w_ref(("c", "d")) - self.assertFalse(snapshot == other) - - def test___eq___same_reference_different_data(self): - snapshot = self._make_w_ref(("a", "b")) - other = self._make_w_ref(("a", "b"), {"foo": "bar"}) - self.assertFalse(snapshot == other) - - def test___eq___same_reference_same_data(self): - snapshot = self._make_w_ref(("a", "b"), {"foo": "bar"}) - other = self._make_w_ref(("a", "b"), {"foo": "bar"}) - self.assertTrue(snapshot == other) - - def test___hash__(self): - from google.protobuf import timestamp_pb2 - - client = mock.MagicMock() - client.__hash__.return_value = 234566789 - reference = self._make_reference("hi", "bye", client=client) - data = {"zoop": 83} - update_time = timestamp_pb2.Timestamp(seconds=123456, nanos=123456789) - snapshot = self._make_one( - reference, data, True, None, mock.sentinel.create_time, update_time - ) - self.assertEqual( - hash(snapshot), hash(reference) + hash(123456) + hash(123456789) - ) - - def test__client_property(self): - reference = self._make_reference( - "ok", "fine", "now", "fore", client=mock.sentinel.client - ) - snapshot = self._make_one(reference, {}, False, None, None, None) - self.assertIs(snapshot._client, mock.sentinel.client) - - def test_exists_property(self): - reference = mock.sentinel.reference - - snapshot1 = self._make_one(reference, {}, False, None, None, None) - self.assertFalse(snapshot1.exists) - snapshot2 = self._make_one(reference, {}, True, None, None, None) - self.assertTrue(snapshot2.exists) - - def test_id_property(self): - document_id = "around" - reference = self._make_reference( - "look", document_id, client=mock.sentinel.client - ) - snapshot = self._make_one(reference, {}, True, None, None, None) - self.assertEqual(snapshot.id, document_id) - self.assertEqual(reference.id, document_id) - - def test_reference_property(self): - snapshot = self._make_one(mock.sentinel.reference, {}, True, None, None, None) - self.assertIs(snapshot.reference, mock.sentinel.reference) - - def test_get(self): - data = {"one": {"bold": "move"}} - snapshot = self._make_one(None, data, True, None, None, None) - - first_read = snapshot.get("one") - second_read = snapshot.get("one") - self.assertEqual(first_read, data.get("one")) - self.assertIsNot(first_read, data.get("one")) - self.assertEqual(first_read, second_read) - self.assertIsNot(first_read, second_read) - - with self.assertRaises(KeyError): - snapshot.get("two") - - def test_nonexistent_snapshot(self): - snapshot = self._make_one(None, None, False, None, None, None) - self.assertIsNone(snapshot.get("one")) - - def test_to_dict(self): - data = {"a": 10, "b": ["definitely", "mutable"], "c": {"45": 50}} - snapshot = self._make_one(None, data, True, None, None, None) - as_dict = snapshot.to_dict() - self.assertEqual(as_dict, data) - self.assertIsNot(as_dict, data) - # Check that the data remains unchanged. - as_dict["b"].append("hi") - self.assertEqual(data, snapshot.to_dict()) - self.assertNotEqual(data, as_dict) - - def test_non_existent(self): - snapshot = self._make_one(None, None, False, None, None, None) - as_dict = snapshot.to_dict() - self.assertIsNone(as_dict) - - -class Test__get_document_path(unittest.TestCase): - @staticmethod - def _call_fut(client, path): - from google.cloud.firestore_v1.document import _get_document_path - - return _get_document_path(client, path) - - def test_it(self): - project = "prah-jekt" - client = _make_client(project=project) - path = ("Some", "Document", "Child", "Shockument") - document_path = self._call_fut(client, path) - - expected = "projects/{}/databases/{}/documents/{}".format( - project, client._database, "/".join(path) - ) - self.assertEqual(document_path, expected) - - -class Test__consume_single_get(unittest.TestCase): - @staticmethod - def _call_fut(response_iterator): - from google.cloud.firestore_v1.document import _consume_single_get - - return _consume_single_get(response_iterator) - - def test_success(self): - response_iterator = iter([mock.sentinel.result]) - result = self._call_fut(response_iterator) - self.assertIs(result, mock.sentinel.result) - - def test_failure_not_enough(self): - response_iterator = iter([]) - with self.assertRaises(ValueError): - self._call_fut(response_iterator) - - def test_failure_too_many(self): - response_iterator = iter([None, None]) - with self.assertRaises(ValueError): - self._call_fut(response_iterator) - - -class Test__first_write_result(unittest.TestCase): - @staticmethod - def _call_fut(write_results): - from google.cloud.firestore_v1.document import _first_write_result - - return _first_write_result(write_results) - - def test_success(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - single_result = write_pb2.WriteResult( - update_time=timestamp_pb2.Timestamp(seconds=1368767504, nanos=458000123) - ) - write_results = [single_result] - result = self._call_fut(write_results) - self.assertIs(result, single_result) - - def test_failure_not_enough(self): - write_results = [] - with self.assertRaises(ValueError): - self._call_fut(write_results) - - def test_more_than_one(self): - from google.cloud.firestore_v1.proto import write_pb2 - - result1 = write_pb2.WriteResult() - result2 = write_pb2.WriteResult() - write_results = [result1, result2] - result = self._call_fut(write_results) - self.assertIs(result, result1) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="project-project"): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project=project, credentials=credentials) diff --git a/firestore/tests/unit/v1/test_field_path.py b/firestore/tests/unit/v1/test_field_path.py deleted file mode 100644 index 55aefab4c152..000000000000 --- a/firestore/tests/unit/v1/test_field_path.py +++ /dev/null @@ -1,500 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class Test__tokenize_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1 import field_path - - return field_path._tokenize_field_path(path) - - def _expect(self, path, split_path): - self.assertEqual(list(self._call_fut(path)), split_path) - - def test_w_empty(self): - self._expect("", []) - - def test_w_single_dot(self): - self._expect(".", ["."]) - - def test_w_single_simple(self): - self._expect("abc", ["abc"]) - - def test_w_single_quoted(self): - self._expect("`c*de`", ["`c*de`"]) - - def test_w_quoted_embedded_dot(self): - self._expect("`c*.de`", ["`c*.de`"]) - - def test_w_quoted_escaped_backtick(self): - self._expect(r"`c*\`de`", [r"`c*\`de`"]) - - def test_w_dotted_quoted(self): - self._expect("`*`.`~`", ["`*`", ".", "`~`"]) - - def test_w_dotted(self): - self._expect("a.b.`c*de`", ["a", ".", "b", ".", "`c*de`"]) - - def test_w_dotted_escaped(self): - self._expect("_0.`1`.`+2`", ["_0", ".", "`1`", ".", "`+2`"]) - - def test_w_unconsumed_characters(self): - path = "a~b" - with self.assertRaises(ValueError): - list(self._call_fut(path)) - - -class Test_split_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1 import field_path - - return field_path.split_field_path(path) - - def test_w_single_dot(self): - with self.assertRaises(ValueError): - self._call_fut(".") - - def test_w_leading_dot(self): - with self.assertRaises(ValueError): - self._call_fut(".a.b.c") - - def test_w_trailing_dot(self): - with self.assertRaises(ValueError): - self._call_fut("a.b.") - - def test_w_missing_dot(self): - with self.assertRaises(ValueError): - self._call_fut("a`c*de`f") - - def test_w_half_quoted_field(self): - with self.assertRaises(ValueError): - self._call_fut("`c*de") - - def test_w_empty(self): - self.assertEqual(self._call_fut(""), []) - - def test_w_simple_field(self): - self.assertEqual(self._call_fut("a"), ["a"]) - - def test_w_dotted_field(self): - self.assertEqual(self._call_fut("a.b.cde"), ["a", "b", "cde"]) - - def test_w_quoted_field(self): - self.assertEqual(self._call_fut("a.b.`c*de`"), ["a", "b", "`c*de`"]) - - def test_w_quoted_field_escaped_backtick(self): - self.assertEqual(self._call_fut(r"`c*\`de`"), [r"`c*\`de`"]) - - -class Test_parse_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1 import field_path - - return field_path.parse_field_path(path) - - def test_wo_escaped_names(self): - self.assertEqual(self._call_fut("a.b.c"), ["a", "b", "c"]) - - def test_w_escaped_backtick(self): - self.assertEqual(self._call_fut("`a\\`b`.c.d"), ["a`b", "c", "d"]) - - def test_w_escaped_backslash(self): - self.assertEqual(self._call_fut("`a\\\\b`.c.d"), ["a\\b", "c", "d"]) - - def test_w_first_name_escaped_wo_closing_backtick(self): - with self.assertRaises(ValueError): - self._call_fut("`a\\`b.c.d") - - -class Test_render_field_path(unittest.TestCase): - @staticmethod - def _call_fut(field_names): - from google.cloud.firestore_v1 import field_path - - return field_path.render_field_path(field_names) - - def test_w_empty(self): - self.assertEqual(self._call_fut([]), "") - - def test_w_one_simple(self): - self.assertEqual(self._call_fut(["a"]), "a") - - def test_w_one_starts_w_digit(self): - self.assertEqual(self._call_fut(["0abc"]), "`0abc`") - - def test_w_one_w_non_alphanum(self): - self.assertEqual(self._call_fut(["a b c"]), "`a b c`") - - def test_w_one_w_backtick(self): - self.assertEqual(self._call_fut(["a`b"]), "`a\\`b`") - - def test_w_one_w_backslash(self): - self.assertEqual(self._call_fut(["a\\b"]), "`a\\\\b`") - - def test_multiple(self): - self.assertEqual(self._call_fut(["a", "b", "c"]), "a.b.c") - - -class Test_get_nested_value(unittest.TestCase): - - DATA = { - "top1": {"middle2": {"bottom3": 20, "bottom4": 22}, "middle5": True}, - "top6": b"\x00\x01 foo", - } - - @staticmethod - def _call_fut(path, data): - from google.cloud.firestore_v1 import field_path - - return field_path.get_nested_value(path, data) - - def test_simple(self): - self.assertIs(self._call_fut("top1", self.DATA), self.DATA["top1"]) - - def test_nested(self): - self.assertIs( - self._call_fut("top1.middle2", self.DATA), self.DATA["top1"]["middle2"] - ) - self.assertIs( - self._call_fut("top1.middle2.bottom3", self.DATA), - self.DATA["top1"]["middle2"]["bottom3"], - ) - - def test_missing_top_level(self): - from google.cloud.firestore_v1.field_path import _FIELD_PATH_MISSING_TOP - - field_path = "top8" - with self.assertRaises(KeyError) as exc_info: - self._call_fut(field_path, self.DATA) - - err_msg = _FIELD_PATH_MISSING_TOP.format(field_path) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_missing_key(self): - from google.cloud.firestore_v1.field_path import _FIELD_PATH_MISSING_KEY - - with self.assertRaises(KeyError) as exc_info: - self._call_fut("top1.middle2.nope", self.DATA) - - err_msg = _FIELD_PATH_MISSING_KEY.format("nope", "top1.middle2") - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_bad_type(self): - from google.cloud.firestore_v1.field_path import _FIELD_PATH_WRONG_TYPE - - with self.assertRaises(KeyError) as exc_info: - self._call_fut("top6.middle7", self.DATA) - - err_msg = _FIELD_PATH_WRONG_TYPE.format("top6", "middle7") - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class TestFieldPath(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1 import field_path - - return field_path.FieldPath - - def _make_one(self, *args): - klass = self._get_target_class() - return klass(*args) - - def test_ctor_w_none_in_part(self): - with self.assertRaises(ValueError): - self._make_one("a", None, "b") - - def test_ctor_w_empty_string_in_part(self): - with self.assertRaises(ValueError): - self._make_one("a", "", "b") - - def test_ctor_w_integer_part(self): - with self.assertRaises(ValueError): - self._make_one("a", 3, "b") - - def test_ctor_w_list(self): - parts = ["a", "b", "c"] - with self.assertRaises(ValueError): - self._make_one(parts) - - def test_ctor_w_tuple(self): - parts = ("a", "b", "c") - with self.assertRaises(ValueError): - self._make_one(parts) - - def test_ctor_w_iterable_part(self): - with self.assertRaises(ValueError): - self._make_one("a", ["a"], "b") - - def test_constructor_w_single_part(self): - field_path = self._make_one("a") - self.assertEqual(field_path.parts, ("a",)) - - def test_constructor_w_multiple_parts(self): - field_path = self._make_one("a", "b", "c") - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_ctor_w_invalid_chars_in_part(self): - invalid_parts = ("~", "*", "/", "[", "]", ".") - for invalid_part in invalid_parts: - field_path = self._make_one(invalid_part) - self.assertEqual(field_path.parts, (invalid_part,)) - - def test_ctor_w_double_dots(self): - field_path = self._make_one("a..b") - self.assertEqual(field_path.parts, ("a..b",)) - - def test_ctor_w_unicode(self): - field_path = self._make_one("一", "二", "三") - self.assertEqual(field_path.parts, ("一", "二", "三")) - - def test_from_api_repr_w_empty_string(self): - api_repr = "" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_empty_field_name(self): - api_repr = "a..b" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_invalid_chars(self): - invalid_parts = ("~", "*", "/", "[", "]", ".") - for invalid_part in invalid_parts: - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(invalid_part) - - def test_from_api_repr_w_ascii_single(self): - api_repr = "a" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a",)) - - def test_from_api_repr_w_ascii_dotted(self): - api_repr = "a.b.c" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_from_api_repr_w_non_ascii_dotted_non_quoted(self): - api_repr = "a.一" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_non_ascii_dotted_quoted(self): - api_repr = "a.`一`" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a", "一")) - - def test_from_string_w_empty_string(self): - path_string = "" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_empty_field_name(self): - path_string = "a..b" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_leading_dot(self): - path_string = ".b.c" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_trailing_dot(self): - path_string = "a.b." - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_leading_invalid_chars(self): - invalid_paths = ("~", "*", "/", "[", "]") - for invalid_path in invalid_paths: - field_path = self._get_target_class().from_string(invalid_path) - self.assertEqual(field_path.parts, (invalid_path,)) - - def test_from_string_w_embedded_invalid_chars(self): - invalid_paths = ("a~b", "x*y", "f/g", "h[j", "k]l") - for invalid_path in invalid_paths: - with self.assertRaises(ValueError): - self._get_target_class().from_string(invalid_path) - - def test_from_string_w_ascii_single(self): - path_string = "a" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a",)) - - def test_from_string_w_ascii_dotted(self): - path_string = "a.b.c" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_from_string_w_non_ascii_dotted(self): - path_string = "a.一" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a", "一")) - - def test___hash___w_single_part(self): - field_path = self._make_one("a") - self.assertEqual(hash(field_path), hash("a")) - - def test___hash___w_multiple_parts(self): - field_path = self._make_one("a", "b") - self.assertEqual(hash(field_path), hash("a.b")) - - def test___hash___w_escaped_parts(self): - field_path = self._make_one("a", "3") - self.assertEqual(hash(field_path), hash("a.`3`")) - - def test___eq___w_matching_type(self): - field_path = self._make_one("a", "b") - string_path = self._get_target_class().from_string("a.b") - self.assertEqual(field_path, string_path) - - def test___eq___w_non_matching_type(self): - field_path = self._make_one("a", "c") - other = mock.Mock() - other.parts = "a", "b" - self.assertNotEqual(field_path, other) - - def test___lt___w_matching_type(self): - field_path = self._make_one("a", "b") - string_path = self._get_target_class().from_string("a.c") - self.assertTrue(field_path < string_path) - - def test___lt___w_non_matching_type(self): - field_path = self._make_one("a", "b") - other = object() - # Python 2 doesn't raise TypeError here, but Python3 does. - self.assertIs(field_path.__lt__(other), NotImplemented) - - def test___add__(self): - path1 = "a123", "b456" - path2 = "c789", "d012" - path3 = "c789.d012" - field_path1 = self._make_one(*path1) - field_path1_string = self._make_one(*path1) - field_path2 = self._make_one(*path2) - field_path1 += field_path2 - field_path1_string += path3 - field_path2 = field_path2 + self._make_one(*path1) - self.assertEqual(field_path1, self._make_one(*(path1 + path2))) - self.assertEqual(field_path2, self._make_one(*(path2 + path1))) - self.assertEqual(field_path1_string, field_path1) - self.assertNotEqual(field_path1, field_path2) - with self.assertRaises(TypeError): - field_path1 + 305 - - def test_to_api_repr_a(self): - parts = "a" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "a") - - def test_to_api_repr_backtick(self): - parts = "`" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\``") - - def test_to_api_repr_dot(self): - parts = "." - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`.`") - - def test_to_api_repr_slash(self): - parts = "\\" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\\`") - - def test_to_api_repr_double_slash(self): - parts = r"\\" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\\\\`") - - def test_to_api_repr_underscore(self): - parts = "_33132" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "_33132") - - def test_to_api_repr_unicode_non_simple(self): - parts = "一" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`一`") - - def test_to_api_repr_number_non_simple(self): - parts = "03" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`03`") - - def test_to_api_repr_simple_with_dot(self): - field_path = self._make_one("a.b") - self.assertEqual(field_path.to_api_repr(), "`a.b`") - - def test_to_api_repr_non_simple_with_dot(self): - parts = "a.一" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`a.一`") - - def test_to_api_repr_simple(self): - parts = "a0332432" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "a0332432") - - def test_to_api_repr_chain(self): - parts = "a", "`", "\\", "_3", "03", "a03", "\\\\", "a0332432", "一" - field_path = self._make_one(*parts) - self.assertEqual( - field_path.to_api_repr(), r"a.`\``.`\\`._3.`03`.a03.`\\\\`.a0332432.`一`" - ) - - def test_eq_or_parent_same(self): - field_path = self._make_one("a", "b") - other = self._make_one("a", "b") - self.assertTrue(field_path.eq_or_parent(other)) - - def test_eq_or_parent_prefix(self): - field_path = self._make_one("a", "b") - other = self._make_one("a", "b", "c") - self.assertTrue(field_path.eq_or_parent(other)) - self.assertTrue(other.eq_or_parent(field_path)) - - def test_eq_or_parent_no_prefix(self): - field_path = self._make_one("a", "b") - other = self._make_one("d", "e", "f") - self.assertFalse(field_path.eq_or_parent(other)) - self.assertFalse(other.eq_or_parent(field_path)) - - def test_lineage_empty(self): - field_path = self._make_one() - expected = set() - self.assertEqual(field_path.lineage(), expected) - - def test_lineage_single(self): - field_path = self._make_one("a") - expected = set() - self.assertEqual(field_path.lineage(), expected) - - def test_lineage_nested(self): - field_path = self._make_one("a", "b", "c") - expected = set([self._make_one("a"), self._make_one("a", "b")]) - self.assertEqual(field_path.lineage(), expected) - - def test_document_id(self): - parts = "__name__" - field_path = self._make_one(parts) - self.assertEqual(field_path.document_id(), parts) diff --git a/firestore/tests/unit/v1/test_order.py b/firestore/tests/unit/v1/test_order.py deleted file mode 100644 index c37e2470a3ec..000000000000 --- a/firestore/tests/unit/v1/test_order.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http:#www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import six -import unittest - -from google.cloud.firestore_v1._helpers import encode_value, GeoPoint -from google.cloud.firestore_v1.order import Order -from google.cloud.firestore_v1.order import TypeOrder - -from google.cloud.firestore_v1.proto import document_pb2 - -from google.protobuf import timestamp_pb2 - - -class TestOrder(unittest.TestCase): - - if six.PY2: - assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.order import Order - - return Order - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_order(self): - # Constants used to represent min/max values of storage types. - int_max_value = 2 ** 31 - 1 - int_min_value = -(2 ** 31) - float_min_value = 1.175494351 ** -38 - float_nan = float("nan") - inf = float("inf") - - groups = [None] * 65 - - groups[0] = [nullValue()] - - groups[1] = [_boolean_value(False)] - groups[2] = [_boolean_value(True)] - - # numbers - groups[3] = [_double_value(float_nan), _double_value(float_nan)] - groups[4] = [_double_value(-inf)] - groups[5] = [_int_value(int_min_value - 1)] - groups[6] = [_int_value(int_min_value)] - groups[7] = [_double_value(-1.1)] - # Integers and Doubles order the same. - groups[8] = [_int_value(-1), _double_value(-1.0)] - groups[9] = [_double_value(-float_min_value)] - # zeros all compare the same. - groups[10] = [ - _int_value(0), - _double_value(-0.0), - _double_value(0.0), - _double_value(+0.0), - ] - groups[11] = [_double_value(float_min_value)] - groups[12] = [_int_value(1), _double_value(1.0)] - groups[13] = [_double_value(1.1)] - groups[14] = [_int_value(int_max_value)] - groups[15] = [_int_value(int_max_value + 1)] - groups[16] = [_double_value(inf)] - - groups[17] = [_timestamp_value(123, 0)] - groups[18] = [_timestamp_value(123, 123)] - groups[19] = [_timestamp_value(345, 0)] - - # strings - groups[20] = [_string_value("")] - groups[21] = [_string_value("\u0000\ud7ff\ue000\uffff")] - groups[22] = [_string_value("(╯°□°)╯︵ ┻━┻")] - groups[23] = [_string_value("a")] - groups[24] = [_string_value("abc def")] - # latin small letter e + combining acute accent + latin small letter b - groups[25] = [_string_value("e\u0301b")] - groups[26] = [_string_value("æ")] - # latin small letter e with acute accent + latin small letter a - groups[27] = [_string_value("\u00e9a")] - - # blobs - groups[28] = [_blob_value(b"")] - groups[29] = [_blob_value(b"\x00")] - groups[30] = [_blob_value(b"\x00\x01\x02\x03\x04")] - groups[31] = [_blob_value(b"\x00\x01\x02\x04\x03")] - groups[32] = [_blob_value(b"\x7f")] - - # resource names - groups[33] = [_reference_value("projects/p1/databases/d1/documents/c1/doc1")] - groups[34] = [_reference_value("projects/p1/databases/d1/documents/c1/doc2")] - groups[35] = [ - _reference_value("projects/p1/databases/d1/documents/c1/doc2/c2/doc1") - ] - groups[36] = [ - _reference_value("projects/p1/databases/d1/documents/c1/doc2/c2/doc2") - ] - groups[37] = [_reference_value("projects/p1/databases/d1/documents/c10/doc1")] - groups[38] = [_reference_value("projects/p1/databases/d1/documents/c2/doc1")] - groups[39] = [_reference_value("projects/p2/databases/d2/documents/c1/doc1")] - groups[40] = [_reference_value("projects/p2/databases/d2/documents/c1-/doc1")] - groups[41] = [_reference_value("projects/p2/databases/d3/documents/c1-/doc1")] - - # geo points - groups[42] = [_geoPoint_value(-90, -180)] - groups[43] = [_geoPoint_value(-90, 0)] - groups[44] = [_geoPoint_value(-90, 180)] - groups[45] = [_geoPoint_value(0, -180)] - groups[46] = [_geoPoint_value(0, 0)] - groups[47] = [_geoPoint_value(0, 180)] - groups[48] = [_geoPoint_value(1, -180)] - groups[49] = [_geoPoint_value(1, 0)] - groups[50] = [_geoPoint_value(1, 180)] - groups[51] = [_geoPoint_value(90, -180)] - groups[52] = [_geoPoint_value(90, 0)] - groups[53] = [_geoPoint_value(90, 180)] - - # arrays - groups[54] = [_array_value()] - groups[55] = [_array_value(["bar"])] - groups[56] = [_array_value(["foo"])] - groups[57] = [_array_value(["foo", 0])] - groups[58] = [_array_value(["foo", 1])] - groups[59] = [_array_value(["foo", "0"])] - - # objects - groups[60] = [_object_value({"bar": 0})] - groups[61] = [_object_value({"bar": 0, "foo": 1})] - groups[62] = [_object_value({"bar": 1})] - groups[63] = [_object_value({"bar": 2})] - groups[64] = [_object_value({"bar": "0"})] - - target = self._make_one() - - for i in range(len(groups)): - for left in groups[i]: - for j in range(len(groups)): - for right in groups[j]: - expected = Order._compare_to(i, j) - - self.assertEqual( - target.compare(left, right), - expected, - "comparing L->R {} ({}) to {} ({})".format( - i, left, j, right - ), - ) - - expected = Order._compare_to(j, i) - self.assertEqual( - target.compare(right, left), - expected, - "comparing R->L {} ({}) to {} ({})".format( - j, right, i, left - ), - ) - - def test_typeorder_type_failure(self): - target = self._make_one() - left = mock.Mock() - left.WhichOneof.return_value = "imaginary-type" - - with self.assertRaisesRegex(ValueError, "Could not detect value"): - target.compare(left, mock.Mock()) - - def test_failure_to_find_type(self): - target = self._make_one() - left = mock.Mock() - left.WhichOneof.return_value = "imaginary-type" - right = mock.Mock() - # Patch from value to get to the deep compare. Since left is a bad type - # expect this to fail with value error. - with mock.patch.object(TypeOrder, "from_value") as to: - to.value = None - with self.assertRaisesRegex(ValueError, "'Unknown ``value_type``"): - target.compare(left, right) - - def test_compare_objects_different_keys(self): - left = _object_value({"foo": 0}) - right = _object_value({"bar": 0}) - - target = self._make_one() - target.compare(left, right) - - -def _boolean_value(b): - return encode_value(b) - - -def _double_value(d): - return encode_value(d) - - -def _int_value(l): - return encode_value(l) - - -def _string_value(s): - if not isinstance(s, six.text_type): - s = six.u(s) - return encode_value(s) - - -def _reference_value(r): - return document_pb2.Value(reference_value=r) - - -def _blob_value(b): - return encode_value(b) - - -def nullValue(): - return encode_value(None) - - -def _timestamp_value(seconds, nanos): - return document_pb2.Value( - timestamp_value=timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) - ) - - -def _geoPoint_value(latitude, longitude): - return encode_value(GeoPoint(latitude, longitude)) - - -def _array_value(values=[]): - return encode_value(values) - - -def _object_value(keysAndValues): - return encode_value(keysAndValues) diff --git a/firestore/tests/unit/v1/test_query.py b/firestore/tests/unit/v1/test_query.py deleted file mode 100644 index bdb0e922d00b..000000000000 --- a/firestore/tests/unit/v1/test_query.py +++ /dev/null @@ -1,1746 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import types -import unittest - -import mock -import six - - -class TestQuery(unittest.TestCase): - - if six.PY2: - assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.query import Query - - return Query - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor_defaults(self): - query = self._make_one(mock.sentinel.parent) - self.assertIs(query._parent, mock.sentinel.parent) - self.assertIsNone(query._projection) - self.assertEqual(query._field_filters, ()) - self.assertEqual(query._orders, ()) - self.assertIsNone(query._limit) - self.assertIsNone(query._offset) - self.assertIsNone(query._start_at) - self.assertIsNone(query._end_at) - self.assertFalse(query._all_descendants) - - def _make_one_all_fields( - self, limit=9876, offset=12, skip_fields=(), parent=None, all_descendants=True - ): - kwargs = { - "projection": mock.sentinel.projection, - "field_filters": mock.sentinel.filters, - "orders": mock.sentinel.orders, - "limit": limit, - "offset": offset, - "start_at": mock.sentinel.start_at, - "end_at": mock.sentinel.end_at, - "all_descendants": all_descendants, - } - for field in skip_fields: - kwargs.pop(field) - if parent is None: - parent = mock.sentinel.parent - return self._make_one(parent, **kwargs) - - def test_constructor_explicit(self): - limit = 234 - offset = 56 - query = self._make_one_all_fields(limit=limit, offset=offset) - self.assertIs(query._parent, mock.sentinel.parent) - self.assertIs(query._projection, mock.sentinel.projection) - self.assertIs(query._field_filters, mock.sentinel.filters) - self.assertEqual(query._orders, mock.sentinel.orders) - self.assertEqual(query._limit, limit) - self.assertEqual(query._offset, offset) - self.assertIs(query._start_at, mock.sentinel.start_at) - self.assertIs(query._end_at, mock.sentinel.end_at) - self.assertTrue(query._all_descendants) - - def test__client_property(self): - parent = mock.Mock(_client=mock.sentinel.client, spec=["_client"]) - query = self._make_one(parent) - self.assertIs(query._client, mock.sentinel.client) - - def test___eq___other_type(self): - query = self._make_one_all_fields() - other = object() - self.assertFalse(query == other) - - def test___eq___different_parent(self): - parent = mock.sentinel.parent - other_parent = mock.sentinel.other_parent - query = self._make_one_all_fields(parent=parent) - other = self._make_one_all_fields(parent=other_parent) - self.assertFalse(query == other) - - def test___eq___different_projection(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, skip_fields=("projection",)) - query._projection = mock.sentinel.projection - other = self._make_one_all_fields(parent=parent, skip_fields=("projection",)) - other._projection = mock.sentinel.other_projection - self.assertFalse(query == other) - - def test___eq___different_field_filters(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, skip_fields=("field_filters",)) - query._field_filters = mock.sentinel.field_filters - other = self._make_one_all_fields(parent=parent, skip_fields=("field_filters",)) - other._field_filters = mock.sentinel.other_field_filters - self.assertFalse(query == other) - - def test___eq___different_orders(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, skip_fields=("orders",)) - query._orders = mock.sentinel.orders - other = self._make_one_all_fields(parent=parent, skip_fields=("orders",)) - other._orders = mock.sentinel.other_orders - self.assertFalse(query == other) - - def test___eq___different_limit(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, limit=10) - other = self._make_one_all_fields(parent=parent, limit=20) - self.assertFalse(query == other) - - def test___eq___different_offset(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, offset=10) - other = self._make_one_all_fields(parent=parent, offset=20) - self.assertFalse(query == other) - - def test___eq___different_start_at(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, skip_fields=("start_at",)) - query._start_at = mock.sentinel.start_at - other = self._make_one_all_fields(parent=parent, skip_fields=("start_at",)) - other._start_at = mock.sentinel.other_start_at - self.assertFalse(query == other) - - def test___eq___different_end_at(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, skip_fields=("end_at",)) - query._end_at = mock.sentinel.end_at - other = self._make_one_all_fields(parent=parent, skip_fields=("end_at",)) - other._end_at = mock.sentinel.other_end_at - self.assertFalse(query == other) - - def test___eq___different_all_descendants(self): - parent = mock.sentinel.parent - query = self._make_one_all_fields(parent=parent, all_descendants=True) - other = self._make_one_all_fields(parent=parent, all_descendants=False) - self.assertFalse(query == other) - - def test___eq___hit(self): - query = self._make_one_all_fields() - other = self._make_one_all_fields() - self.assertTrue(query == other) - - def _compare_queries(self, query1, query2, attr_name): - attrs1 = query1.__dict__.copy() - attrs2 = query2.__dict__.copy() - - attrs1.pop(attr_name) - attrs2.pop(attr_name) - - # The only different should be in ``attr_name``. - self.assertEqual(len(attrs1), len(attrs2)) - for key, value in attrs1.items(): - self.assertIs(value, attrs2[key]) - - @staticmethod - def _make_projection_for_select(field_paths): - from google.cloud.firestore_v1.proto import query_pb2 - - return query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ) - - def test_select_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.select(["*"]) - - def test_select(self): - query1 = self._make_one_all_fields(all_descendants=True) - - field_paths2 = ["foo", "bar"] - query2 = query1.select(field_paths2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual( - query2._projection, self._make_projection_for_select(field_paths2) - ) - self._compare_queries(query1, query2, "_projection") - - # Make sure it overrides. - field_paths3 = ["foo.baz"] - query3 = query2.select(field_paths3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual( - query3._projection, self._make_projection_for_select(field_paths3) - ) - self._compare_queries(query2, query3, "_projection") - - def test_where_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.where("*", "==", 1) - - def test_where(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - query = self._make_one_all_fields( - skip_fields=("field_filters",), all_descendants=True - ) - new_query = query.where("power.level", ">", 9000) - - self.assertIsNot(query, new_query) - self.assertIsInstance(new_query, self._get_target_class()) - self.assertEqual(len(new_query._field_filters), 1) - - field_pb = new_query._field_filters[0] - expected_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="power.level"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(integer_value=9000), - ) - self.assertEqual(field_pb, expected_pb) - self._compare_queries(query, new_query, "_field_filters") - - def _where_unary_helper(self, value, op_enum, op_string="=="): - from google.cloud.firestore_v1.proto import query_pb2 - - query = self._make_one_all_fields(skip_fields=("field_filters",)) - field_path = "feeeld" - new_query = query.where(field_path, op_string, value) - - self.assertIsNot(query, new_query) - self.assertIsInstance(new_query, self._get_target_class()) - self.assertEqual(len(new_query._field_filters), 1) - - field_pb = new_query._field_filters[0] - expected_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=op_enum, - ) - self.assertEqual(field_pb, expected_pb) - self._compare_queries(query, new_query, "_field_filters") - - def test_where_eq_null(self): - from google.cloud.firestore_v1.gapic import enums - - op_enum = enums.StructuredQuery.UnaryFilter.Operator.IS_NULL - self._where_unary_helper(None, op_enum) - - def test_where_gt_null(self): - with self.assertRaises(ValueError): - self._where_unary_helper(None, 0, op_string=">") - - def test_where_eq_nan(self): - from google.cloud.firestore_v1.gapic import enums - - op_enum = enums.StructuredQuery.UnaryFilter.Operator.IS_NAN - self._where_unary_helper(float("nan"), op_enum) - - def test_where_le_nan(self): - with self.assertRaises(ValueError): - self._where_unary_helper(float("nan"), 0, op_string="<=") - - def test_where_w_delete(self): - from google.cloud.firestore_v1 import DELETE_FIELD - - with self.assertRaises(ValueError): - self._where_unary_helper(DELETE_FIELD, 0) - - def test_where_w_server_timestamp(self): - from google.cloud.firestore_v1 import SERVER_TIMESTAMP - - with self.assertRaises(ValueError): - self._where_unary_helper(SERVER_TIMESTAMP, 0) - - def test_where_w_array_remove(self): - from google.cloud.firestore_v1 import ArrayRemove - - with self.assertRaises(ValueError): - self._where_unary_helper(ArrayRemove([1, 3, 5]), 0) - - def test_where_w_array_union(self): - from google.cloud.firestore_v1 import ArrayUnion - - with self.assertRaises(ValueError): - self._where_unary_helper(ArrayUnion([2, 4, 8]), 0) - - def test_order_by_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.order_by("*") - - def test_order_by(self): - from google.cloud.firestore_v1.gapic import enums - - klass = self._get_target_class() - query1 = self._make_one_all_fields( - skip_fields=("orders",), all_descendants=True - ) - - field_path2 = "a" - query2 = query1.order_by(field_path2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, klass) - order_pb2 = _make_order_pb( - field_path2, enums.StructuredQuery.Direction.ASCENDING - ) - self.assertEqual(query2._orders, (order_pb2,)) - self._compare_queries(query1, query2, "_orders") - - # Make sure it appends to the orders. - field_path3 = "b" - query3 = query2.order_by(field_path3, direction=klass.DESCENDING) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, klass) - order_pb3 = _make_order_pb( - field_path3, enums.StructuredQuery.Direction.DESCENDING - ) - self.assertEqual(query3._orders, (order_pb2, order_pb3)) - self._compare_queries(query2, query3, "_orders") - - def test_limit(self): - query1 = self._make_one_all_fields(all_descendants=True) - - limit2 = 100 - query2 = query1.limit(limit2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual(query2._limit, limit2) - self._compare_queries(query1, query2, "_limit") - - # Make sure it overrides. - limit3 = 10 - query3 = query2.limit(limit3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._limit, limit3) - self._compare_queries(query2, query3, "_limit") - - def test_offset(self): - query1 = self._make_one_all_fields(all_descendants=True) - - offset2 = 23 - query2 = query1.offset(offset2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual(query2._offset, offset2) - self._compare_queries(query1, query2, "_offset") - - # Make sure it overrides. - offset3 = 35 - query3 = query2.offset(offset3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._offset, offset3) - self._compare_queries(query2, query3, "_offset") - - @staticmethod - def _make_collection(*path, **kw): - from google.cloud.firestore_v1 import collection - - return collection.CollectionReference(*path, **kw) - - @staticmethod - def _make_docref(*path, **kw): - from google.cloud.firestore_v1 import document - - return document.DocumentReference(*path, **kw) - - @staticmethod - def _make_snapshot(docref, values): - from google.cloud.firestore_v1 import document - - return document.DocumentSnapshot(docref, values, True, None, None, None) - - def test__cursor_helper_w_dict(self): - values = {"a": 7, "b": "foo"} - query1 = self._make_one(mock.sentinel.parent) - query1._all_descendants = True - query2 = query1._cursor_helper(values, True, True) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._end_at) - self.assertTrue(query2._all_descendants) - - cursor, before = query2._start_at - - self.assertEqual(cursor, values) - self.assertTrue(before) - - def test__cursor_helper_w_tuple(self): - values = (7, "foo") - query1 = self._make_one(mock.sentinel.parent) - query2 = query1._cursor_helper(values, False, True) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._end_at) - - cursor, before = query2._start_at - - self.assertEqual(cursor, list(values)) - self.assertFalse(before) - - def test__cursor_helper_w_list(self): - values = [7, "foo"] - query1 = self._make_one(mock.sentinel.parent) - query2 = query1._cursor_helper(values, True, False) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._start_at) - - cursor, before = query2._end_at - - self.assertEqual(cursor, values) - self.assertIsNot(cursor, values) - self.assertTrue(before) - - def test__cursor_helper_w_snapshot_wrong_collection(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("there", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection) - - with self.assertRaises(ValueError): - query._cursor_helper(snapshot, False, False) - - def test__cursor_helper_w_snapshot_other_collection_all_descendants(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("there", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query1 = self._make_one(collection, all_descendants=True) - - query2 = query1._cursor_helper(snapshot, False, False) - - self.assertIs(query2._parent, collection) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, ()) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._start_at) - - cursor, before = query2._end_at - - self.assertIs(cursor, snapshot) - self.assertFalse(before) - - def test__cursor_helper_w_snapshot(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query1 = self._make_one(collection) - - query2 = query1._cursor_helper(snapshot, False, False) - - self.assertIs(query2._parent, collection) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, ()) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._start_at) - - cursor, before = query2._end_at - - self.assertIs(cursor, snapshot) - self.assertFalse(before) - - def test_start_at(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields( - parent=collection, skip_fields=("orders",), all_descendants=True - ) - query2 = query1.order_by("hi") - - document_fields3 = {"hi": "mom"} - query3 = query2.start_at(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._start_at, (document_fields3, True)) - self._compare_queries(query2, query3, "_start_at") - - # Make sure it overrides. - query4 = query3.order_by("bye") - values5 = {"hi": "zap", "bye": 88} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.start_at(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._start_at, (document_fields5, True)) - self._compare_queries(query4, query5, "_start_at") - - def test_start_after(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("down") - - document_fields3 = {"down": 99.75} - query3 = query2.start_after(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._start_at, (document_fields3, False)) - self._compare_queries(query2, query3, "_start_at") - - # Make sure it overrides. - query4 = query3.order_by("out") - values5 = {"down": 100.25, "out": b"\x00\x01"} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.start_after(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._start_at, (document_fields5, False)) - self._compare_queries(query4, query5, "_start_at") - - def test_end_before(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("down") - - document_fields3 = {"down": 99.75} - query3 = query2.end_before(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._end_at, (document_fields3, True)) - self._compare_queries(query2, query3, "_end_at") - - # Make sure it overrides. - query4 = query3.order_by("out") - values5 = {"down": 100.25, "out": b"\x00\x01"} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.end_before(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._end_at, (document_fields5, True)) - self._compare_queries(query4, query5, "_end_at") - self._compare_queries(query4, query5, "_end_at") - - def test_end_at(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("hi") - - document_fields3 = {"hi": "mom"} - query3 = query2.end_at(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._end_at, (document_fields3, False)) - self._compare_queries(query2, query3, "_end_at") - - # Make sure it overrides. - query4 = query3.order_by("bye") - values5 = {"hi": "zap", "bye": 88} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.end_at(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._end_at, (document_fields5, False)) - self._compare_queries(query4, query5, "_end_at") - - def test__filters_pb_empty(self): - query = self._make_one(mock.sentinel.parent) - self.assertEqual(len(query._field_filters), 0) - self.assertIsNone(query._filters_pb()) - - def test__filters_pb_single(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - query1 = self._make_one(mock.sentinel.parent) - query2 = query1.where("x.y", ">", 50.5) - filter_pb = query2._filters_pb() - expected_pb = query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="x.y"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=50.5), - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test__filters_pb_multi(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - query1 = self._make_one(mock.sentinel.parent) - query2 = query1.where("x.y", ">", 50.5) - query3 = query2.where("ABC", "==", 123) - - filter_pb = query3._filters_pb() - op_class = enums.StructuredQuery.FieldFilter.Operator - expected_pb = query_pb2.StructuredQuery.Filter( - composite_filter=query_pb2.StructuredQuery.CompositeFilter( - op=enums.StructuredQuery.CompositeFilter.Operator.AND, - filters=[ - query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference( - field_path="x.y" - ), - op=op_class.GREATER_THAN, - value=document_pb2.Value(double_value=50.5), - ) - ), - query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference( - field_path="ABC" - ), - op=op_class.EQUAL, - value=document_pb2.Value(integer_value=123), - ) - ), - ], - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test__normalize_projection_none(self): - query = self._make_one(mock.sentinel.parent) - self.assertIsNone(query._normalize_projection(None)) - - def test__normalize_projection_empty(self): - projection = self._make_projection_for_select([]) - query = self._make_one(mock.sentinel.parent) - normalized = query._normalize_projection(projection) - field_paths = [field_ref.field_path for field_ref in normalized.fields] - self.assertEqual(field_paths, ["__name__"]) - - def test__normalize_projection_non_empty(self): - projection = self._make_projection_for_select(["a", "b"]) - query = self._make_one(mock.sentinel.parent) - self.assertIs(query._normalize_projection(projection), projection) - - def test__normalize_orders_wo_orders_wo_cursors(self): - query = self._make_one(mock.sentinel.parent) - expected = [] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_w_orders_wo_cursors(self): - query = self._make_one(mock.sentinel.parent).order_by("a") - expected = [query._make_order("a", "ASCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection).start_at(snapshot) - expected = [query._make_order("__name__", "ASCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_w_name_orders_w_snapshot_cursor(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = ( - self._make_one(collection) - .order_by("__name__", "DESCENDING") - .start_at(snapshot) - ) - expected = [query._make_order("__name__", "DESCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_exists(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = ( - self._make_one(collection) - .where("c", "<=", 20) - .order_by("c", "DESCENDING") - .start_at(snapshot) - ) - expected = [ - query._make_order("c", "DESCENDING"), - query._make_order("__name__", "DESCENDING"), - ] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_where(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection).where("c", "<=", 20).end_at(snapshot) - expected = [ - query._make_order("c", "ASCENDING"), - query._make_order("__name__", "ASCENDING"), - ] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_cursor_none(self): - query = self._make_one(mock.sentinel.parent) - self.assertIsNone(query._normalize_cursor(None, query._orders)) - - def test__normalize_cursor_no_order(self): - cursor = ([1], True) - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_list_mismatched_order(self): - cursor = ([1, 2], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_dict_mismatched_order(self): - cursor = ({"a": 1}, True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_delete(self): - from google.cloud.firestore_v1 import DELETE_FIELD - - cursor = ([DELETE_FIELD], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_server_timestamp(self): - from google.cloud.firestore_v1 import SERVER_TIMESTAMP - - cursor = ([SERVER_TIMESTAMP], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_array_remove(self): - from google.cloud.firestore_v1 import ArrayRemove - - cursor = ([ArrayRemove([1, 3, 5])], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_array_union(self): - from google.cloud.firestore_v1 import ArrayUnion - - cursor = ([ArrayUnion([2, 4, 8])], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_list_hit(self): - cursor = ([1], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_dict_hit(self): - cursor = ({"b": 1}, True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_dict_with_dot_key_hit(self): - cursor = ({"b.a": 1}, True) - query = self._make_one(mock.sentinel.parent).order_by("b.a", "ASCENDING") - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_dict_with_inner_data_hit(self): - cursor = ({"b": {"a": 1}}, True) - query = self._make_one(mock.sentinel.parent).order_by("b.a", "ASCENDING") - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_snapshot_hit(self): - values = {"b": 1} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - cursor = (snapshot, True) - collection = self._make_collection("here") - query = self._make_one(collection).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_w___name___w_reference(self): - db_string = "projects/my-project/database/(default)" - client = mock.Mock(spec=["_database_string"]) - client._database_string = db_string - parent = mock.Mock(spec=["_path", "_client"]) - parent._client = client - parent._path = ["C"] - query = self._make_one(parent).order_by("__name__", "ASCENDING") - docref = self._make_docref("here", "doc_id") - values = {"a": 7} - snapshot = self._make_snapshot(docref, values) - expected = docref - cursor = (snapshot, True) - - self.assertEqual( - query._normalize_cursor(cursor, query._orders), ([expected], True) - ) - - def test__normalize_cursor_w___name___wo_slash(self): - db_string = "projects/my-project/database/(default)" - client = mock.Mock(spec=["_database_string"]) - client._database_string = db_string - parent = mock.Mock(spec=["_path", "_client", "document"]) - parent._client = client - parent._path = ["C"] - document = parent.document.return_value = mock.Mock(spec=[]) - query = self._make_one(parent).order_by("__name__", "ASCENDING") - cursor = (["b"], True) - expected = document - - self.assertEqual( - query._normalize_cursor(cursor, query._orders), ([expected], True) - ) - parent.document.assert_called_once_with("b") - - def test__to_protobuf_all_fields(self): - from google.protobuf import wrappers_pb2 - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="cat", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.select(["X", "Y", "Z"]) - query3 = query2.where("Y", ">", 2.5) - query4 = query3.order_by("X") - query5 = query4.limit(17) - query6 = query5.offset(3) - query7 = query6.start_at({"X": 10}) - query8 = query7.end_at({"X": 25}) - - structured_query_pb = query8._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "select": query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in ["X", "Y", "Z"] - ] - ), - "where": query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="Y"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=2.5), - ) - ), - "order_by": [ - _make_order_pb("X", enums.StructuredQuery.Direction.ASCENDING) - ], - "start_at": query_pb2.Cursor( - values=[document_pb2.Value(integer_value=10)], before=True - ), - "end_at": query_pb2.Cursor(values=[document_pb2.Value(integer_value=25)]), - "offset": 3, - "limit": wrappers_pb2.Int32Value(value=17), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_select_only(self): - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="cat", spec=["id"]) - query1 = self._make_one(parent) - field_paths = ["a.b", "a.c", "d"] - query2 = query1.select(field_paths) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "select": query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_where_only(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="dog", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.where("a", "==", u"b") - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "where": query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="a"), - op=enums.StructuredQuery.FieldFilter.Operator.EQUAL, - value=document_pb2.Value(string_value=u"b"), - ) - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_order_by_only(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="fish", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.order_by("abc") - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("abc", enums.StructuredQuery.Direction.ASCENDING) - ], - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_start_at_only(self): - # NOTE: "only" is wrong since we must have ``order_by`` as well. - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="phish", spec=["id"]) - query = self._make_one(parent).order_by("X.Y").start_after({"X": {"Y": u"Z"}}) - - structured_query_pb = query._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("X.Y", enums.StructuredQuery.Direction.ASCENDING) - ], - "start_at": query_pb2.Cursor( - values=[document_pb2.Value(string_value=u"Z")] - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_end_at_only(self): - # NOTE: "only" is wrong since we must have ``order_by`` as well. - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="ghoti", spec=["id"]) - query = self._make_one(parent).order_by("a").end_at({"a": 88}) - - structured_query_pb = query._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("a", enums.StructuredQuery.Direction.ASCENDING) - ], - "end_at": query_pb2.Cursor(values=[document_pb2.Value(integer_value=88)]), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_offset_only(self): - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="cartt", spec=["id"]) - query1 = self._make_one(parent) - offset = 14 - query2 = query1.offset(offset) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "offset": offset, - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_limit_only(self): - from google.protobuf import wrappers_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - parent = mock.Mock(id="donut", spec=["id"]) - query1 = self._make_one(parent) - limit = 31 - query2 = query1.limit(limit) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "limit": wrappers_pb2.Int32Value(value=limit), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - - self.assertEqual(structured_query_pb, expected_pb) - - def test_get_simple(self): - import warnings - - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dee") - - # Add a dummy response to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/sleep".format(expected_prefix) - data = {"snooze": 10} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - - with warnings.catch_warnings(record=True) as warned: - get_response = query.get() - - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("dee", "sleep")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - def test_stream_simple(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dee") - - # Add a dummy response to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/sleep".format(expected_prefix) - data = {"snooze": 10} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("dee", "sleep")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_with_transaction(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Create a real-ish transaction for this client. - transaction = client.transaction() - txn_id = b"\x00\x00\x01-work-\xf2" - transaction._id = txn_id - - # Make a **real** collection reference as parent. - parent = client.collection("declaration") - - # Add a dummy response to the minimal fake GAPIC. - parent_path, expected_prefix = parent._parent_info() - name = "{}/burger".format(expected_prefix) - data = {"lettuce": b"\xee\x87"} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream(transaction=transaction) - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("declaration", "burger")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test_stream_no_results(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["run_query"]) - empty_response = _make_query_response() - run_query_response = iter([empty_response]) - firestore_api.run_query.return_value = run_query_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dah", "dah", "dum") - query = self._make_one(parent) - - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - self.assertEqual(list(get_response), []) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_second_response_in_empty_stream(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["run_query"]) - empty_response1 = _make_query_response() - empty_response2 = _make_query_response() - run_query_response = iter([empty_response1, empty_response2]) - firestore_api.run_query.return_value = run_query_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dah", "dah", "dum") - query = self._make_one(parent) - - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - self.assertEqual(list(get_response), []) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_with_skipped_results(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("talk", "and", "chew-gum") - - # Add two dummy responses to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - response_pb1 = _make_query_response(skipped_results=1) - name = "{}/clock".format(expected_prefix) - data = {"noon": 12, "nested": {"bird": 10.5}} - response_pb2 = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb1, response_pb2]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("talk", "and", "chew-gum", "clock")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_empty_after_first_response(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("charles") - - # Add two dummy responses to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/bark".format(expected_prefix) - data = {"lee": "hoop"} - response_pb1 = _make_query_response(name=name, data=data) - response_pb2 = _make_query_response() - firestore_api.run_query.return_value = iter([response_pb1, response_pb2]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("charles", "bark")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_w_collection_group(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("charles") - other = client.collection("dora") - - # Add two dummy responses to the minimal fake GAPIC. - _, other_prefix = other._parent_info() - name = "{}/bark".format(other_prefix) - data = {"lee": "hoop"} - response_pb1 = _make_query_response(name=name, data=data) - response_pb2 = _make_query_response() - firestore_api.run_query.return_value = iter([response_pb1, response_pb2]) - - # Execute the query and check the response. - query = self._make_one(parent) - query._all_descendants = True - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - to_match = other.document("bark") - self.assertEqual(snapshot.reference._document_path, to_match._document_path) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - @mock.patch("google.cloud.firestore_v1.query.Watch", autospec=True) - def test_on_snapshot(self, watch): - query = self._make_one(mock.sentinel.parent) - query.on_snapshot(None) - watch.for_query.assert_called_once() - - def test_comparator_no_ordering(self): - query = self._make_one(mock.sentinel.parent) - query._orders = [] - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, -1) - - def test_comparator_no_ordering_same_id(self): - query = self._make_one(mock.sentinel.parent) - query._orders = [] - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument1") - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, 0) - - def test_comparator_ordering(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = 1 # ascending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "secondlovelace"}, - } - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, 1) - - def test_comparator_ordering_descending(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = -1 # descending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "secondlovelace"}, - } - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, -1) - - def test_comparator_missing_order_by_field_in_data_raises(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = 1 # ascending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = {} - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - with self.assertRaisesRegex(ValueError, "Can only compare fields "): - query._comparator(doc1, doc2) - - -class Test__enum_from_op_string(unittest.TestCase): - @staticmethod - def _call_fut(op_string): - from google.cloud.firestore_v1.query import _enum_from_op_string - - return _enum_from_op_string(op_string) - - @staticmethod - def _get_op_class(): - from google.cloud.firestore_v1.gapic import enums - - return enums.StructuredQuery.FieldFilter.Operator - - def test_lt(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut("<"), op_class.LESS_THAN) - - def test_le(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut("<="), op_class.LESS_THAN_OR_EQUAL) - - def test_eq(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut("=="), op_class.EQUAL) - - def test_ge(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut(">="), op_class.GREATER_THAN_OR_EQUAL) - - def test_gt(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut(">"), op_class.GREATER_THAN) - - def test_array_contains(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut("array_contains"), op_class.ARRAY_CONTAINS) - - def test_in(self): - op_class = self._get_op_class() - self.assertEqual(self._call_fut("in"), op_class.IN) - - def test_array_contains_any(self): - op_class = self._get_op_class() - self.assertEqual( - self._call_fut("array_contains_any"), op_class.ARRAY_CONTAINS_ANY - ) - - def test_invalid(self): - with self.assertRaises(ValueError): - self._call_fut("?") - - -class Test__isnan(unittest.TestCase): - @staticmethod - def _call_fut(value): - from google.cloud.firestore_v1.query import _isnan - - return _isnan(value) - - def test_valid(self): - self.assertTrue(self._call_fut(float("nan"))) - - def test_invalid(self): - self.assertFalse(self._call_fut(51.5)) - self.assertFalse(self._call_fut(None)) - self.assertFalse(self._call_fut("str")) - self.assertFalse(self._call_fut(int)) - self.assertFalse(self._call_fut(1.0 + 1.0j)) - - -class Test__enum_from_direction(unittest.TestCase): - @staticmethod - def _call_fut(direction): - from google.cloud.firestore_v1.query import _enum_from_direction - - return _enum_from_direction(direction) - - def test_success(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.query import Query - - dir_class = enums.StructuredQuery.Direction - self.assertEqual(self._call_fut(Query.ASCENDING), dir_class.ASCENDING) - self.assertEqual(self._call_fut(Query.DESCENDING), dir_class.DESCENDING) - - # Ints pass through - self.assertEqual(self._call_fut(dir_class.ASCENDING), dir_class.ASCENDING) - self.assertEqual(self._call_fut(dir_class.DESCENDING), dir_class.DESCENDING) - - def test_failure(self): - with self.assertRaises(ValueError): - self._call_fut("neither-ASCENDING-nor-DESCENDING") - - -class Test__filter_pb(unittest.TestCase): - @staticmethod - def _call_fut(field_or_unary): - from google.cloud.firestore_v1.query import _filter_pb - - return _filter_pb(field_or_unary) - - def test_unary(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import query_pb2 - - unary_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="a.b.c"), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL, - ) - filter_pb = self._call_fut(unary_pb) - expected_pb = query_pb2.StructuredQuery.Filter(unary_filter=unary_pb) - self.assertEqual(filter_pb, expected_pb) - - def test_field(self): - from google.cloud.firestore_v1.gapic import enums - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import query_pb2 - - field_filter_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="XYZ"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=90.75), - ) - filter_pb = self._call_fut(field_filter_pb) - expected_pb = query_pb2.StructuredQuery.Filter(field_filter=field_filter_pb) - self.assertEqual(filter_pb, expected_pb) - - def test_bad_type(self): - with self.assertRaises(ValueError): - self._call_fut(None) - - -class Test__cursor_pb(unittest.TestCase): - @staticmethod - def _call_fut(cursor_pair): - from google.cloud.firestore_v1.query import _cursor_pb - - return _cursor_pb(cursor_pair) - - def test_no_pair(self): - self.assertIsNone(self._call_fut(None)) - - def test_success(self): - from google.cloud.firestore_v1.proto import query_pb2 - from google.cloud.firestore_v1 import _helpers - - data = [1.5, 10, True] - cursor_pair = data, True - - cursor_pb = self._call_fut(cursor_pair) - - expected_pb = query_pb2.Cursor( - values=[_helpers.encode_value(value) for value in data], before=True - ) - self.assertEqual(cursor_pb, expected_pb) - - -class Test__query_response_to_snapshot(unittest.TestCase): - @staticmethod - def _call_fut(response_pb, collection, expected_prefix): - from google.cloud.firestore_v1.query import _query_response_to_snapshot - - return _query_response_to_snapshot(response_pb, collection, expected_prefix) - - def test_empty(self): - response_pb = _make_query_response() - snapshot = self._call_fut(response_pb, None, None) - self.assertIsNone(snapshot) - - def test_after_offset(self): - skipped_results = 410 - response_pb = _make_query_response(skipped_results=skipped_results) - snapshot = self._call_fut(response_pb, None, None) - self.assertIsNone(snapshot) - - def test_response(self): - from google.cloud.firestore_v1.document import DocumentSnapshot - - client = _make_client() - collection = client.collection("a", "b", "c") - _, expected_prefix = collection._parent_info() - - # Create name for the protobuf. - doc_id = "gigantic" - name = "{}/{}".format(expected_prefix, doc_id) - data = {"a": 901, "b": True} - response_pb = _make_query_response(name=name, data=data) - - snapshot = self._call_fut(response_pb, collection, expected_prefix) - self.assertIsInstance(snapshot, DocumentSnapshot) - expected_path = collection._path + (doc_id,) - self.assertEqual(snapshot.reference._path, expected_path) - self.assertEqual(snapshot.to_dict(), data) - self.assertTrue(snapshot.exists) - self.assertEqual(snapshot.read_time, response_pb.read_time) - self.assertEqual(snapshot.create_time, response_pb.document.create_time) - self.assertEqual(snapshot.update_time, response_pb.document.update_time) - - -class Test__collection_group_query_response_to_snapshot(unittest.TestCase): - @staticmethod - def _call_fut(response_pb, collection): - from google.cloud.firestore_v1.query import ( - _collection_group_query_response_to_snapshot, - ) - - return _collection_group_query_response_to_snapshot(response_pb, collection) - - def test_empty(self): - response_pb = _make_query_response() - snapshot = self._call_fut(response_pb, None) - self.assertIsNone(snapshot) - - def test_after_offset(self): - skipped_results = 410 - response_pb = _make_query_response(skipped_results=skipped_results) - snapshot = self._call_fut(response_pb, None) - self.assertIsNone(snapshot) - - def test_response(self): - from google.cloud.firestore_v1.document import DocumentSnapshot - - client = _make_client() - collection = client.collection("a", "b", "c") - other_collection = client.collection("a", "b", "d") - to_match = other_collection.document("gigantic") - data = {"a": 901, "b": True} - response_pb = _make_query_response(name=to_match._document_path, data=data) - - snapshot = self._call_fut(response_pb, collection) - self.assertIsInstance(snapshot, DocumentSnapshot) - self.assertEqual(snapshot.reference._document_path, to_match._document_path) - self.assertEqual(snapshot.to_dict(), data) - self.assertTrue(snapshot.exists) - self.assertEqual(snapshot.read_time, response_pb.read_time) - self.assertEqual(snapshot.create_time, response_pb.document.create_time) - self.assertEqual(snapshot.update_time, response_pb.document.update_time) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="project-project"): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project=project, credentials=credentials) - - -def _make_order_pb(field_path, direction): - from google.cloud.firestore_v1.proto import query_pb2 - - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=direction, - ) - - -def _make_query_response(**kwargs): - # kwargs supported are ``skipped_results``, ``name`` and ``data`` - from google.cloud.firestore_v1.proto import document_pb2 - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1 import _helpers - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - kwargs["read_time"] = read_time - - name = kwargs.pop("name", None) - data = kwargs.pop("data", None) - if name is not None and data is not None: - document_pb = document_pb2.Document( - name=name, fields=_helpers.encode_dict(data) - ) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - document_pb.update_time.CopyFrom(update_time) - document_pb.create_time.CopyFrom(create_time) - - kwargs["document"] = document_pb - - return firestore_pb2.RunQueryResponse(**kwargs) diff --git a/firestore/tests/unit/v1/test_transaction.py b/firestore/tests/unit/v1/test_transaction.py deleted file mode 100644 index 8cae24a23831..000000000000 --- a/firestore/tests/unit/v1/test_transaction.py +++ /dev/null @@ -1,1020 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import mock - - -class TestTransaction(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.transaction import Transaction - - return Transaction - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor_defaults(self): - from google.cloud.firestore_v1.transaction import MAX_ATTEMPTS - - transaction = self._make_one(mock.sentinel.client) - self.assertIs(transaction._client, mock.sentinel.client) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, MAX_ATTEMPTS) - self.assertFalse(transaction._read_only) - self.assertIsNone(transaction._id) - - def test_constructor_explicit(self): - transaction = self._make_one( - mock.sentinel.client, max_attempts=10, read_only=True - ) - self.assertIs(transaction._client, mock.sentinel.client) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, 10) - self.assertTrue(transaction._read_only) - self.assertIsNone(transaction._id) - - def test__add_write_pbs_failure(self): - from google.cloud.firestore_v1.transaction import _WRITE_READ_ONLY - - batch = self._make_one(mock.sentinel.client, read_only=True) - self.assertEqual(batch._write_pbs, []) - with self.assertRaises(ValueError) as exc_info: - batch._add_write_pbs([mock.sentinel.write]) - - self.assertEqual(exc_info.exception.args, (_WRITE_READ_ONLY,)) - self.assertEqual(batch._write_pbs, []) - - def test__add_write_pbs(self): - batch = self._make_one(mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - batch._add_write_pbs([mock.sentinel.write]) - self.assertEqual(batch._write_pbs, [mock.sentinel.write]) - - def test__options_protobuf_read_only(self): - from google.cloud.firestore_v1.proto import common_pb2 - - transaction = self._make_one(mock.sentinel.client, read_only=True) - options_pb = transaction._options_protobuf(None) - expected_pb = common_pb2.TransactionOptions( - read_only=common_pb2.TransactionOptions.ReadOnly() - ) - self.assertEqual(options_pb, expected_pb) - - def test__options_protobuf_read_only_retry(self): - from google.cloud.firestore_v1.transaction import _CANT_RETRY_READ_ONLY - - transaction = self._make_one(mock.sentinel.client, read_only=True) - retry_id = b"illuminate" - - with self.assertRaises(ValueError) as exc_info: - transaction._options_protobuf(retry_id) - - self.assertEqual(exc_info.exception.args, (_CANT_RETRY_READ_ONLY,)) - - def test__options_protobuf_read_write(self): - transaction = self._make_one(mock.sentinel.client) - options_pb = transaction._options_protobuf(None) - self.assertIsNone(options_pb) - - def test__options_protobuf_on_retry(self): - from google.cloud.firestore_v1.proto import common_pb2 - - transaction = self._make_one(mock.sentinel.client) - retry_id = b"hocus-pocus" - options_pb = transaction._options_protobuf(retry_id) - expected_pb = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite( - retry_transaction=retry_id - ) - ) - self.assertEqual(options_pb, expected_pb) - - def test_in_progress_property(self): - transaction = self._make_one(mock.sentinel.client) - self.assertFalse(transaction.in_progress) - transaction._id = b"not-none-bites" - self.assertTrue(transaction.in_progress) - - def test_id_property(self): - transaction = self._make_one(mock.sentinel.client) - transaction._id = mock.sentinel.eye_dee - self.assertIs(transaction.id, mock.sentinel.eye_dee) - - def test__begin(self): - from google.cloud.firestore_v1.gapic import firestore_client - from google.cloud.firestore_v1.proto import firestore_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - txn_id = b"to-begin" - response = firestore_pb2.BeginTransactionResponse(transaction=txn_id) - firestore_api.begin_transaction.return_value = response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and ``begin()`` it. - transaction = self._make_one(client) - self.assertIsNone(transaction._id) - - ret_val = transaction._begin() - self.assertIsNone(ret_val) - self.assertEqual(transaction._id, txn_id) - - # Verify the called mock. - firestore_api.begin_transaction.assert_called_once_with( - client._database_string, options_=None, metadata=client._rpc_metadata - ) - - def test__begin_failure(self): - from google.cloud.firestore_v1.transaction import _CANT_BEGIN - - client = _make_client() - transaction = self._make_one(client) - transaction._id = b"not-none" - - with self.assertRaises(ValueError) as exc_info: - transaction._begin() - - err_msg = _CANT_BEGIN.format(transaction._id) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test__clean_up(self): - transaction = self._make_one(mock.sentinel.client) - transaction._write_pbs.extend( - [mock.sentinel.write_pb1, mock.sentinel.write_pb2] - ) - transaction._id = b"not-this-time-my-friend" - - ret_val = transaction._clean_up() - self.assertIsNone(ret_val) - - self.assertEqual(transaction._write_pbs, []) - self.assertIsNone(transaction._id) - - def test__rollback(self): - from google.protobuf import empty_pb2 - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - firestore_api.rollback.return_value = empty_pb2.Empty() - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and roll it back. - transaction = self._make_one(client) - txn_id = b"to-be-r\x00lled" - transaction._id = txn_id - ret_val = transaction._rollback() - self.assertIsNone(ret_val) - self.assertIsNone(transaction._id) - - # Verify the called mock. - firestore_api.rollback.assert_called_once_with( - client._database_string, txn_id, metadata=client._rpc_metadata - ) - - def test__rollback_not_allowed(self): - from google.cloud.firestore_v1.transaction import _CANT_ROLLBACK - - client = _make_client() - transaction = self._make_one(client) - self.assertIsNone(transaction._id) - - with self.assertRaises(ValueError) as exc_info: - transaction._rollback() - - self.assertEqual(exc_info.exception.args, (_CANT_ROLLBACK,)) - - def test__rollback_failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy failure. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - exc = exceptions.InternalServerError("Fire during rollback.") - firestore_api.rollback.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and roll it back. - transaction = self._make_one(client) - txn_id = b"roll-bad-server" - transaction._id = txn_id - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - transaction._rollback() - - self.assertIs(exc_info.exception, exc) - self.assertIsNone(transaction._id) - self.assertEqual(transaction._write_pbs, []) - - # Verify the called mock. - firestore_api.rollback.assert_called_once_with( - client._database_string, txn_id, metadata=client._rpc_metadata - ) - - def test__commit(self): - from google.cloud.firestore_v1.gapic import firestore_client - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client("phone-joe") - client._firestore_api_internal = firestore_api - - # Actually make a transaction with some mutations and call _commit(). - transaction = self._make_one(client) - txn_id = b"under-over-thru-woods" - transaction._id = txn_id - document = client.document("zap", "galaxy", "ship", "space") - transaction.set(document, {"apple": 4.5}) - write_pbs = transaction._write_pbs[::] - - write_results = transaction._commit() - self.assertEqual(write_results, list(commit_response.write_results)) - # Make sure transaction has no more "changes". - self.assertIsNone(transaction._id) - self.assertEqual(transaction._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test__commit_not_allowed(self): - from google.cloud.firestore_v1.transaction import _CANT_COMMIT - - transaction = self._make_one(mock.sentinel.client) - self.assertIsNone(transaction._id) - with self.assertRaises(ValueError) as exc_info: - transaction._commit() - - self.assertEqual(exc_info.exception.args, (_CANT_COMMIT,)) - - def test__commit_failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy failure. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - exc = exceptions.InternalServerError("Fire during commit.") - firestore_api.commit.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction with some mutations and call _commit(). - transaction = self._make_one(client) - txn_id = b"beep-fail-commit" - transaction._id = txn_id - transaction.create(client.document("up", "down"), {"water": 1.0}) - transaction.delete(client.document("up", "left")) - write_pbs = transaction._write_pbs[::] - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - transaction._commit() - - self.assertIs(exc_info.exception, exc) - self.assertEqual(transaction._id, txn_id) - self.assertEqual(transaction._write_pbs, write_pbs) - - # Verify the called mock. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test_get_all(self): - client = mock.Mock(spec=["get_all"]) - transaction = self._make_one(client) - ref1, ref2 = mock.Mock(), mock.Mock() - result = transaction.get_all([ref1, ref2]) - client.get_all.assert_called_once_with([ref1, ref2], transaction=transaction.id) - self.assertIs(result, client.get_all.return_value) - - def test_get_document_ref(self): - from google.cloud.firestore_v1.document import DocumentReference - - client = mock.Mock(spec=["get_all"]) - transaction = self._make_one(client) - ref = DocumentReference("documents", "doc-id") - result = transaction.get(ref) - client.get_all.assert_called_once_with([ref], transaction=transaction.id) - self.assertIs(result, client.get_all.return_value) - - def test_get_w_query(self): - from google.cloud.firestore_v1.query import Query - - client = mock.Mock(spec=[]) - transaction = self._make_one(client) - query = Query(parent=mock.Mock(spec=[])) - query.stream = mock.MagicMock() - result = transaction.get(query) - query.stream.assert_called_once_with(transaction=transaction.id) - self.assertIs(result, query.stream.return_value) - - def test_get_failure(self): - client = _make_client() - transaction = self._make_one(client) - ref_or_query = object() - with self.assertRaises(ValueError): - transaction.get(ref_or_query) - - -class Test_Transactional(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.transaction import _Transactional - - return _Transactional - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - wrapped = self._make_one(mock.sentinel.callable_) - self.assertIs(wrapped.to_wrap, mock.sentinel.callable_) - self.assertIsNone(wrapped.current_id) - self.assertIsNone(wrapped.retry_id) - - def test__reset(self): - wrapped = self._make_one(mock.sentinel.callable_) - wrapped.current_id = b"not-none" - wrapped.retry_id = b"also-not" - - ret_val = wrapped._reset() - self.assertIsNone(ret_val) - - self.assertIsNone(wrapped.current_id) - self.assertIsNone(wrapped.retry_id) - - def test__pre_commit_success(self): - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"totes-began" - transaction = _make_transaction(txn_id) - result = wrapped._pre_commit(transaction, "pos", key="word") - self.assertIs(result, mock.sentinel.result) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "pos", key="word") - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_not_called() - - def test__pre_commit_retry_id_already_set_success(self): - from google.cloud.firestore_v1.proto import common_pb2 - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - txn_id1 = b"already-set" - wrapped.retry_id = txn_id1 - - txn_id2 = b"ok-here-too" - transaction = _make_transaction(txn_id2) - result = wrapped._pre_commit(transaction) - self.assertIs(result, mock.sentinel.result) - - self.assertEqual(transaction._id, txn_id2) - self.assertEqual(wrapped.current_id, txn_id2) - self.assertEqual(wrapped.retry_id, txn_id1) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction) - firestore_api = transaction._client._firestore_api - options_ = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite( - retry_transaction=txn_id1 - ) - ) - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=options_, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_not_called() - - def test__pre_commit_failure(self): - exc = RuntimeError("Nope not today.") - to_wrap = mock.Mock(side_effect=exc, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"gotta-fail" - transaction = _make_transaction(txn_id) - with self.assertRaises(RuntimeError) as exc_info: - wrapped._pre_commit(transaction, 10, 20) - self.assertIs(exc_info.exception, exc) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, 10, 20) - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_not_called() - - def test__pre_commit_failure_with_rollback_failure(self): - from google.api_core import exceptions - - exc1 = ValueError("I will not be only failure.") - to_wrap = mock.Mock(side_effect=exc1, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"both-will-fail" - transaction = _make_transaction(txn_id) - # Actually force the ``rollback`` to fail as well. - exc2 = exceptions.InternalServerError("Rollback blues.") - firestore_api = transaction._client._firestore_api - firestore_api.rollback.side_effect = exc2 - - # Try to ``_pre_commit`` - with self.assertRaises(exceptions.InternalServerError) as exc_info: - wrapped._pre_commit(transaction, a="b", c="zebra") - self.assertIs(exc_info.exception, exc2) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, a="b", c="zebra") - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_not_called() - - def test__maybe_commit_success(self): - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"nyet" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - succeeded = wrapped._maybe_commit(transaction) - self.assertTrue(succeeded) - - # On success, _id is reset. - self.assertIsNone(transaction._id) - - # Verify mocks. - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_read_only(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed" - transaction = _make_transaction(txn_id, read_only=True) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail (use ABORTED, but cannot - # retry since read-only). - exc = exceptions.Aborted("Read-only did a bad.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - with self.assertRaises(exceptions.Aborted) as exc_info: - wrapped._maybe_commit(transaction) - self.assertIs(exc_info.exception, exc) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_can_retry(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed-but-retry" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail. - exc = exceptions.Aborted("Read-write did a bad.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - succeeded = wrapped._maybe_commit(transaction) - self.assertFalse(succeeded) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_cannot_retry(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed-but-not-retryable" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail. - exc = exceptions.InternalServerError("Real bad thing") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - wrapped._maybe_commit(transaction) - self.assertIs(exc_info.exception, exc) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test___call__success_first_attempt(self): - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"whole-enchilada" - transaction = _make_transaction(txn_id) - result = wrapped(transaction, "a", b="c") - self.assertIs(result, mock.sentinel.result) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "a", b="c") - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test___call__success_second_attempt(self): - from google.api_core import exceptions - from google.cloud.firestore_v1.proto import common_pb2 - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"whole-enchilada" - transaction = _make_transaction(txn_id) - - # Actually force the ``commit`` to fail on first / succeed on second. - exc = exceptions.Aborted("Contention junction.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = [ - exc, - firestore_pb2.CommitResponse(write_results=[write_pb2.WriteResult()]), - ] - - # Call the __call__-able ``wrapped``. - result = wrapped(transaction, "a", b="c") - self.assertIs(result, mock.sentinel.result) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - wrapped_call = mock.call(transaction, "a", b="c") - self.assertEqual(to_wrap.mock_calls, [wrapped_call, wrapped_call]) - firestore_api = transaction._client._firestore_api - db_str = transaction._client._database_string - options_ = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite(retry_transaction=txn_id) - ) - self.assertEqual( - firestore_api.begin_transaction.mock_calls, - [ - mock.call( - db_str, options_=None, metadata=transaction._client._rpc_metadata - ), - mock.call( - db_str, - options_=options_, - metadata=transaction._client._rpc_metadata, - ), - ], - ) - firestore_api.rollback.assert_not_called() - commit_call = mock.call( - db_str, [], transaction=txn_id, metadata=transaction._client._rpc_metadata - ) - self.assertEqual(firestore_api.commit.mock_calls, [commit_call, commit_call]) - - def test___call__failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1.transaction import _EXCEED_ATTEMPTS_TEMPLATE - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"only-one-shot" - transaction = _make_transaction(txn_id, max_attempts=1) - - # Actually force the ``commit`` to fail. - exc = exceptions.Aborted("Contention just once.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - # Call the __call__-able ``wrapped``. - with self.assertRaises(ValueError) as exc_info: - wrapped(transaction, "here", there=1.5) - - err_msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "here", there=1.5) - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - -class Test_transactional(unittest.TestCase): - @staticmethod - def _call_fut(to_wrap): - from google.cloud.firestore_v1.transaction import transactional - - return transactional(to_wrap) - - def test_it(self): - from google.cloud.firestore_v1.transaction import _Transactional - - wrapped = self._call_fut(mock.sentinel.callable_) - self.assertIsInstance(wrapped, _Transactional) - self.assertIs(wrapped.to_wrap, mock.sentinel.callable_) - - -class Test__commit_with_retry(unittest.TestCase): - @staticmethod - def _call_fut(client, write_pbs, transaction_id): - from google.cloud.firestore_v1.transaction import _commit_with_retry - - return _commit_with_retry(client, write_pbs, transaction_id) - - @mock.patch("google.cloud.firestore_v1.transaction._sleep") - def test_success_first_attempt(self, _sleep): - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - - # Attach the fake GAPIC to a real client. - client = _make_client("summer") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"cheeeeeez" - commit_response = self._call_fut(client, mock.sentinel.write_pbs, txn_id) - self.assertIs(commit_response, firestore_api.commit.return_value) - - # Verify mocks used. - _sleep.assert_not_called() - firestore_api.commit.assert_called_once_with( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - @mock.patch("google.cloud.firestore_v1.transaction._sleep", side_effect=[2.0, 4.0]) - def test_success_third_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first two requests fail and the third succeeds. - firestore_api.commit.side_effect = [ - exceptions.ServiceUnavailable("Server sleepy."), - exceptions.ServiceUnavailable("Server groggy."), - mock.sentinel.commit_response, - ] - - # Attach the fake GAPIC to a real client. - client = _make_client("outside") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"the-world\x00" - commit_response = self._call_fut(client, mock.sentinel.write_pbs, txn_id) - self.assertIs(commit_response, mock.sentinel.commit_response) - - # Verify mocks used. - self.assertEqual(_sleep.call_count, 2) - _sleep.assert_any_call(1.0) - _sleep.assert_any_call(2.0) - # commit() called same way 3 times. - commit_call = mock.call( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - self.assertEqual( - firestore_api.commit.mock_calls, [commit_call, commit_call, commit_call] - ) - - @mock.patch("google.cloud.firestore_v1.transaction._sleep") - def test_failure_first_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first request fails with an un-retryable error. - exc = exceptions.ResourceExhausted("We ran out of fries.") - firestore_api.commit.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client("peanut-butter") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"\x08\x06\x07\x05\x03\x00\x09-jenny" - with self.assertRaises(exceptions.ResourceExhausted) as exc_info: - self._call_fut(client, mock.sentinel.write_pbs, txn_id) - - self.assertIs(exc_info.exception, exc) - - # Verify mocks used. - _sleep.assert_not_called() - firestore_api.commit.assert_called_once_with( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - @mock.patch("google.cloud.firestore_v1.transaction._sleep", return_value=2.0) - def test_failure_second_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first request fails retry-able and second - # fails non-retryable. - exc1 = exceptions.ServiceUnavailable("Come back next time.") - exc2 = exceptions.InternalServerError("Server on fritz.") - firestore_api.commit.side_effect = [exc1, exc2] - - # Attach the fake GAPIC to a real client. - client = _make_client("peanut-butter") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"the-journey-when-and-where-well-go" - with self.assertRaises(exceptions.InternalServerError) as exc_info: - self._call_fut(client, mock.sentinel.write_pbs, txn_id) - - self.assertIs(exc_info.exception, exc2) - - # Verify mocks used. - _sleep.assert_called_once_with(1.0) - # commit() called same way 2 times. - commit_call = mock.call( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - self.assertEqual(firestore_api.commit.mock_calls, [commit_call, commit_call]) - - -class Test__sleep(unittest.TestCase): - @staticmethod - def _call_fut(current_sleep, **kwargs): - from google.cloud.firestore_v1.transaction import _sleep - - return _sleep(current_sleep, **kwargs) - - @mock.patch("random.uniform", return_value=5.5) - @mock.patch("time.sleep", return_value=None) - def test_defaults(self, sleep, uniform): - curr_sleep = 10.0 - self.assertLessEqual(uniform.return_value, curr_sleep) - - new_sleep = self._call_fut(curr_sleep) - self.assertEqual(new_sleep, 2.0 * curr_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - @mock.patch("random.uniform", return_value=10.5) - @mock.patch("time.sleep", return_value=None) - def test_explicit(self, sleep, uniform): - curr_sleep = 12.25 - self.assertLessEqual(uniform.return_value, curr_sleep) - - multiplier = 1.5 - new_sleep = self._call_fut(curr_sleep, max_sleep=100.0, multiplier=multiplier) - self.assertEqual(new_sleep, multiplier * curr_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - @mock.patch("random.uniform", return_value=6.75) - @mock.patch("time.sleep", return_value=None) - def test_exceeds_max(self, sleep, uniform): - curr_sleep = 20.0 - self.assertLessEqual(uniform.return_value, curr_sleep) - - max_sleep = 38.5 - new_sleep = self._call_fut(curr_sleep, max_sleep=max_sleep, multiplier=2.0) - self.assertEqual(new_sleep, max_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="feral-tom-cat"): - from google.cloud.firestore_v1.client import Client - - credentials = _make_credentials() - return Client(project=project, credentials=credentials) - - -def _make_transaction(txn_id, **txn_kwargs): - from google.protobuf import empty_pb2 - from google.cloud.firestore_v1.gapic import firestore_client - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.proto import write_pb2 - from google.cloud.firestore_v1.transaction import Transaction - - # Create a fake GAPIC ... - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # ... with a dummy ``BeginTransactionResponse`` result ... - begin_response = firestore_pb2.BeginTransactionResponse(transaction=txn_id) - firestore_api.begin_transaction.return_value = begin_response - # ... and a dummy ``Rollback`` result ... - firestore_api.rollback.return_value = empty_pb2.Empty() - # ... and a dummy ``Commit`` result. - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - return Transaction(client, **txn_kwargs) diff --git a/firestore/tests/unit/v1/test_transforms.py b/firestore/tests/unit/v1/test_transforms.py deleted file mode 100644 index 04a6dcdc0899..000000000000 --- a/firestore/tests/unit/v1/test_transforms.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class Test_ValueList(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.transforms import _ValueList - - return _ValueList - - def _make_one(self, values): - return self._get_target_class()(values) - - def test_ctor_w_non_list_non_tuple(self): - invalid_values = (None, u"phred", b"DEADBEEF", 123, {}, object()) - for invalid_value in invalid_values: - with self.assertRaises(ValueError): - self._make_one(invalid_value) - - def test_ctor_w_empty(self): - with self.assertRaises(ValueError): - self._make_one([]) - - def test_ctor_w_non_empty_list(self): - values = ["phred", "bharney"] - inst = self._make_one(values) - self.assertEqual(inst.values, values) - - def test_ctor_w_non_empty_tuple(self): - values = ("phred", "bharney") - inst = self._make_one(values) - self.assertEqual(inst.values, list(values)) - - def test___eq___other_type(self): - values = ("phred", "bharney") - inst = self._make_one(values) - other = object() - self.assertFalse(inst == other) - - def test___eq___different_values(self): - values = ("phred", "bharney") - other_values = ("wylma", "bhetty") - inst = self._make_one(values) - other = self._make_one(other_values) - self.assertFalse(inst == other) - - def test___eq___same_values(self): - values = ("phred", "bharney") - inst = self._make_one(values) - other = self._make_one(values) - self.assertTrue(inst == other) - - -class Test_NumericValue(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1.transforms import _NumericValue - - return _NumericValue - - def _make_one(self, values): - return self._get_target_class()(values) - - def test_ctor_w_invalid_types(self): - invalid_values = (None, u"phred", b"DEADBEEF", [], {}, object()) - for invalid_value in invalid_values: - with self.assertRaises(ValueError): - self._make_one(invalid_value) - - def test_ctor_w_int(self): - values = (-10, -1, 0, 1, 10) - for value in values: - inst = self._make_one(value) - self.assertEqual(inst.value, value) - - def test_ctor_w_float(self): - values = (-10.0, -1.0, 0.0, 1.0, 10.0) - for value in values: - inst = self._make_one(value) - self.assertEqual(inst.value, value) - - def test___eq___other_type(self): - value = 3.1415926 - inst = self._make_one(value) - other = object() - self.assertFalse(inst == other) - - def test___eq___different_value(self): - value = 3.1415926 - other_value = 2.71828 - inst = self._make_one(value) - other = self._make_one(other_value) - self.assertFalse(inst == other) - - def test___eq___same_value(self): - value = 3.1415926 - inst = self._make_one(value) - other = self._make_one(value) - self.assertTrue(inst == other) diff --git a/firestore/tests/unit/v1/test_watch.py b/firestore/tests/unit/v1/test_watch.py deleted file mode 100644 index 0778717bcc09..000000000000 --- a/firestore/tests/unit/v1/test_watch.py +++ /dev/null @@ -1,975 +0,0 @@ -import datetime -import unittest -import mock -from google.cloud.firestore_v1.proto import firestore_pb2 - - -class TestWatchDocTree(unittest.TestCase): - def _makeOne(self): - from google.cloud.firestore_v1.watch import WatchDocTree - - return WatchDocTree() - - def test_insert_and_keys(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(sorted(inst.keys()), ["a", "b"]) - - def test_remove_and_keys(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - inst = inst.remove("a") - self.assertEqual(sorted(inst.keys()), ["b"]) - - def test_insert_and_find(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - val = inst.find("a") - self.assertEqual(val.value, 2) - - def test___len__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(len(inst), 2) - - def test___iter__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(sorted(list(inst)), ["a", "b"]) - - def test___contains__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - self.assertTrue("b" in inst) - self.assertFalse("a" in inst) - - -class TestDocumentChange(unittest.TestCase): - def _makeOne(self, type, document, old_index, new_index): - from google.cloud.firestore_v1.watch import DocumentChange - - return DocumentChange(type, document, old_index, new_index) - - def test_ctor(self): - inst = self._makeOne("type", "document", "old_index", "new_index") - self.assertEqual(inst.type, "type") - self.assertEqual(inst.document, "document") - self.assertEqual(inst.old_index, "old_index") - self.assertEqual(inst.new_index, "new_index") - - -class TestWatchResult(unittest.TestCase): - def _makeOne(self, snapshot, name, change_type): - from google.cloud.firestore_v1.watch import WatchResult - - return WatchResult(snapshot, name, change_type) - - def test_ctor(self): - inst = self._makeOne("snapshot", "name", "change_type") - self.assertEqual(inst.snapshot, "snapshot") - self.assertEqual(inst.name, "name") - self.assertEqual(inst.change_type, "change_type") - - -class Test_maybe_wrap_exception(unittest.TestCase): - def _callFUT(self, exc): - from google.cloud.firestore_v1.watch import _maybe_wrap_exception - - return _maybe_wrap_exception(exc) - - def test_is_grpc_error(self): - import grpc - from google.api_core.exceptions import GoogleAPICallError - - exc = grpc.RpcError() - result = self._callFUT(exc) - self.assertEqual(result.__class__, GoogleAPICallError) - - def test_is_not_grpc_error(self): - exc = ValueError() - result = self._callFUT(exc) - self.assertEqual(result.__class__, ValueError) - - -class Test_document_watch_comparator(unittest.TestCase): - def _callFUT(self, doc1, doc2): - from google.cloud.firestore_v1.watch import document_watch_comparator - - return document_watch_comparator(doc1, doc2) - - def test_same_doc(self): - result = self._callFUT(1, 1) - self.assertEqual(result, 0) - - def test_diff_doc(self): - self.assertRaises(AssertionError, self._callFUT, 1, 2) - - -class Test_should_recover(unittest.TestCase): - def _callFUT(self, exception): - from google.cloud.firestore_v1.watch import _should_recover - - return _should_recover(exception) - - def test_w_unavailable(self): - from google.api_core.exceptions import ServiceUnavailable - - exception = ServiceUnavailable("testing") - - self.assertTrue(self._callFUT(exception)) - - def test_w_non_recoverable(self): - exception = ValueError("testing") - - self.assertFalse(self._callFUT(exception)) - - -class Test_should_terminate(unittest.TestCase): - def _callFUT(self, exception): - from google.cloud.firestore_v1.watch import _should_terminate - - return _should_terminate(exception) - - def test_w_unavailable(self): - from google.api_core.exceptions import Cancelled - - exception = Cancelled("testing") - - self.assertTrue(self._callFUT(exception)) - - def test_w_non_recoverable(self): - exception = ValueError("testing") - - self.assertFalse(self._callFUT(exception)) - - -class TestWatch(unittest.TestCase): - def _makeOne( - self, - document_reference=None, - firestore=None, - target=None, - comparator=None, - snapshot_callback=None, - snapshot_class=None, - reference_class=None, - ): # pragma: NO COVER - from google.cloud.firestore_v1.watch import Watch - - if document_reference is None: - document_reference = DummyDocumentReference() - if firestore is None: - firestore = DummyFirestore() - if target is None: - WATCH_TARGET_ID = 0x5079 # "Py" - target = {"documents": {"documents": ["/"]}, "target_id": WATCH_TARGET_ID} - if comparator is None: - comparator = self._document_watch_comparator - if snapshot_callback is None: - snapshot_callback = self._snapshot_callback - if snapshot_class is None: - snapshot_class = DummyDocumentSnapshot - if reference_class is None: - reference_class = DummyDocumentReference - inst = Watch( - document_reference, - firestore, - target, - comparator, - snapshot_callback, - snapshot_class, - reference_class, - BackgroundConsumer=DummyBackgroundConsumer, - ResumableBidiRpc=DummyRpc, - ) - return inst - - def setUp(self): - self.snapshotted = None - - def _document_watch_comparator(self, doc1, doc2): # pragma: NO COVER - return 0 - - def _snapshot_callback(self, docs, changes, read_time): - self.snapshotted = (docs, changes, read_time) - - def test_ctor(self): - from google.cloud.firestore_v1.proto import firestore_pb2 - from google.cloud.firestore_v1.watch import _should_recover - from google.cloud.firestore_v1.watch import _should_terminate - - inst = self._makeOne() - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - self.assertIs(inst._rpc.start_rpc, inst._api.transport.listen) - self.assertIs(inst._rpc.should_recover, _should_recover) - self.assertIs(inst._rpc.should_terminate, _should_terminate) - self.assertIsInstance(inst._rpc.initial_request, firestore_pb2.ListenRequest) - self.assertEqual(inst._rpc.metadata, DummyFirestore._rpc_metadata) - - def test__on_rpc_done(self): - from google.cloud.firestore_v1.watch import _RPC_ERROR_THREAD_NAME - - inst = self._makeOne() - threading = DummyThreading() - with mock.patch("google.cloud.firestore_v1.watch.threading", threading): - inst._on_rpc_done(True) - self.assertTrue(threading.threads[_RPC_ERROR_THREAD_NAME].started) - - def test_close(self): - inst = self._makeOne() - inst.close() - self.assertEqual(inst._consumer, None) - self.assertEqual(inst._rpc, None) - self.assertTrue(inst._closed) - - def test_close_already_closed(self): - inst = self._makeOne() - inst._closed = True - old_consumer = inst._consumer - inst.close() - self.assertEqual(inst._consumer, old_consumer) - - def test_close_inactive(self): - inst = self._makeOne() - old_consumer = inst._consumer - old_consumer.is_active = False - inst.close() - self.assertEqual(old_consumer.stopped, False) - - def test_unsubscribe(self): - inst = self._makeOne() - inst.unsubscribe() - self.assertTrue(inst._rpc is None) - - def test_for_document(self): - from google.cloud.firestore_v1.watch import Watch - - docref = DummyDocumentReference() - snapshot_callback = self._snapshot_callback - snapshot_class_instance = DummyDocumentSnapshot - document_reference_class_instance = DummyDocumentReference - modulename = "google.cloud.firestore_v1.watch" - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - inst = Watch.for_document( - docref, - snapshot_callback, - snapshot_class_instance, - document_reference_class_instance, - ) - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - - def test_for_query(self): - from google.cloud.firestore_v1.watch import Watch - - snapshot_callback = self._snapshot_callback - snapshot_class_instance = DummyDocumentSnapshot - document_reference_class_instance = DummyDocumentReference - client = DummyFirestore() - parent = DummyCollection(client) - modulename = "google.cloud.firestore_v1.watch" - pb2 = DummyPb2() - with mock.patch("%s.firestore_pb2" % modulename, pb2): - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - query = DummyQuery(parent=parent) - inst = Watch.for_query( - query, - snapshot_callback, - snapshot_class_instance, - document_reference_class_instance, - ) - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - self.assertEqual(inst._targets["query"], "dummy query target") - - def test_for_query_nested(self): - from google.cloud.firestore_v1.watch import Watch - - snapshot_callback = self._snapshot_callback - snapshot_class_instance = DummyDocumentSnapshot - document_reference_class_instance = DummyDocumentReference - client = DummyFirestore() - root = DummyCollection(client) - grandparent = DummyDocument("document", parent=root) - parent = DummyCollection(client, parent=grandparent) - modulename = "google.cloud.firestore_v1.watch" - pb2 = DummyPb2() - with mock.patch("%s.firestore_pb2" % modulename, pb2): - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - query = DummyQuery(parent=parent) - inst = Watch.for_query( - query, - snapshot_callback, - snapshot_class_instance, - document_reference_class_instance, - ) - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - self.assertEqual(inst._targets["query"], "dummy query target") - - def test_on_snapshot_target_w_none(self): - inst = self._makeOne() - proto = None - inst.on_snapshot(proto) # nothing to assert, no mutations, no rtnval - self.assertTrue(inst._consumer is None) - self.assertTrue(inst._rpc is None) - - def test_on_snapshot_target_no_change_no_target_ids_not_current(self): - inst = self._makeOne() - proto = DummyProto() - inst.on_snapshot(proto) # nothing to assert, no mutations, no rtnval - - def test_on_snapshot_target_no_change_no_target_ids_current(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.read_time = 1 - inst.current = True - - def push(read_time, next_resume_token): - inst._read_time = read_time - inst._next_resume_token = next_resume_token - - inst.push = push - inst.on_snapshot(proto) - self.assertEqual(inst._read_time, 1) - self.assertEqual(inst._next_resume_token, None) - - def test_on_snapshot_target_add(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.target_change_type = firestore_pb2.TargetChange.ADD - proto.target_change.target_ids = [1] # not "Py" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Unexpected target ID 1 sent by server") - - def test_on_snapshot_target_remove(self): - inst = self._makeOne() - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.REMOVE - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Error 1: hi") - - def test_on_snapshot_target_remove_nocause(self): - inst = self._makeOne() - proto = DummyProto() - target_change = proto.target_change - target_change.cause = None - target_change.target_change_type = firestore_pb2.TargetChange.REMOVE - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Error 13: internal error") - - def test_on_snapshot_target_reset(self): - inst = self._makeOne() - - def reset(): - inst._docs_reset = True - - inst._reset_docs = reset - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.RESET - inst.on_snapshot(proto) - self.assertTrue(inst._docs_reset) - - def test_on_snapshot_target_current(self): - inst = self._makeOne() - inst.current = False - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.CURRENT - inst.on_snapshot(proto) - self.assertTrue(inst.current) - - def test_on_snapshot_target_unknown(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.target_change_type = "unknown" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertTrue(inst._consumer is None) - self.assertTrue(inst._rpc is None) - self.assertEqual(str(exc.exception), "Unknown target change type: unknown ") - - def test_on_snapshot_document_change_removed(self): - from google.cloud.firestore_v1.watch import WATCH_TARGET_ID, ChangeType - - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change.removed_target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "fred" - - proto.document_change.document = DummyDocument() - inst.on_snapshot(proto) - self.assertTrue(inst.change_map["fred"] is ChangeType.REMOVED) - - def test_on_snapshot_document_change_changed(self): - from google.cloud.firestore_v1.watch import WATCH_TARGET_ID - - inst = self._makeOne() - - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "fred" - fields = {} - create_time = None - update_time = None - - proto.document_change.document = DummyDocument() - inst.on_snapshot(proto) - self.assertEqual(inst.change_map["fred"].data, {}) - - def test_on_snapshot_document_change_changed_docname_db_prefix(self): - # TODO: Verify the current behavior. The change map currently contains - # the db-prefixed document name and not the bare document name. - from google.cloud.firestore_v1.watch import WATCH_TARGET_ID - - inst = self._makeOne() - - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "abc://foo/documents/fred" - fields = {} - create_time = None - update_time = None - - proto.document_change.document = DummyDocument() - inst._firestore._database_string = "abc://foo" - inst.on_snapshot(proto) - self.assertEqual(inst.change_map["abc://foo/documents/fred"].data, {}) - - def test_on_snapshot_document_change_neither_changed_nor_removed(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [] - - inst.on_snapshot(proto) - self.assertTrue(not inst.change_map) - - def test_on_snapshot_document_removed(self): - from google.cloud.firestore_v1.watch import ChangeType - - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - - class DummyRemove(object): - document = "fred" - - remove = DummyRemove() - proto.document_remove = remove - proto.document_delete = "" - inst.on_snapshot(proto) - self.assertTrue(inst.change_map["fred"] is ChangeType.REMOVED) - - def test_on_snapshot_filter_update(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - - class DummyFilter(object): - count = 999 - - proto.filter = DummyFilter() - - def reset(): - inst._docs_reset = True - - inst._reset_docs = reset - inst.on_snapshot(proto) - self.assertTrue(inst._docs_reset) - - def test_on_snapshot_filter_update_no_size_change(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - - class DummyFilter(object): - count = 0 - - proto.filter = DummyFilter() - inst._docs_reset = False - - inst.on_snapshot(proto) - self.assertFalse(inst._docs_reset) - - def test_on_snapshot_unknown_listen_type(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - proto.filter = "" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertTrue( - str(exc.exception).startswith("Unknown listen response type"), - str(exc.exception), - ) - - def test_push_callback_called_no_changes(self): - import pytz - - class DummyReadTime(object): - seconds = 1534858278 - - inst = self._makeOne() - inst.push(DummyReadTime, "token") - self.assertEqual( - self.snapshotted, - ([], [], datetime.datetime.fromtimestamp(DummyReadTime.seconds, pytz.utc)), - ) - self.assertTrue(inst.has_pushed) - self.assertEqual(inst.resume_token, "token") - - def test_push_already_pushed(self): - class DummyReadTime(object): - seconds = 1534858278 - - inst = self._makeOne() - inst.has_pushed = True - inst.push(DummyReadTime, "token") - self.assertEqual(self.snapshotted, None) - self.assertTrue(inst.has_pushed) - self.assertEqual(inst.resume_token, "token") - - def test__current_size_empty(self): - inst = self._makeOne() - result = inst._current_size() - self.assertEqual(result, 0) - - def test__current_size_docmap_has_one(self): - inst = self._makeOne() - inst.doc_map["a"] = 1 - result = inst._current_size() - self.assertEqual(result, 1) - - def test__affects_target_target_id_None(self): - inst = self._makeOne() - self.assertTrue(inst._affects_target(None, [])) - - def test__affects_target_current_id_in_target_ids(self): - inst = self._makeOne() - self.assertTrue(inst._affects_target([1], 1)) - - def test__affects_target_current_id_not_in_target_ids(self): - inst = self._makeOne() - self.assertFalse(inst._affects_target([1], 2)) - - def test__extract_changes_doc_removed(self): - from google.cloud.firestore_v1.watch import ChangeType - - inst = self._makeOne() - changes = {"name": ChangeType.REMOVED} - doc_map = {"name": True} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, (["name"], [], [])) - - def test__extract_changes_doc_removed_docname_not_in_docmap(self): - from google.cloud.firestore_v1.watch import ChangeType - - inst = self._makeOne() - changes = {"name": ChangeType.REMOVED} - doc_map = {} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [], [])) - - def test__extract_changes_doc_updated(self): - inst = self._makeOne() - - class Dummy(object): - pass - - doc = Dummy() - snapshot = Dummy() - changes = {"name": snapshot} - doc_map = {"name": doc} - results = inst._extract_changes(doc_map, changes, 1) - self.assertEqual(results, ([], [], [snapshot])) - self.assertEqual(snapshot.read_time, 1) - - def test__extract_changes_doc_updated_read_time_is_None(self): - inst = self._makeOne() - - class Dummy(object): - pass - - doc = Dummy() - snapshot = Dummy() - snapshot.read_time = None - changes = {"name": snapshot} - doc_map = {"name": doc} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [], [snapshot])) - self.assertEqual(snapshot.read_time, None) - - def test__extract_changes_doc_added(self): - inst = self._makeOne() - - class Dummy(object): - pass - - snapshot = Dummy() - changes = {"name": snapshot} - doc_map = {} - results = inst._extract_changes(doc_map, changes, 1) - self.assertEqual(results, ([], [snapshot], [])) - self.assertEqual(snapshot.read_time, 1) - - def test__extract_changes_doc_added_read_time_is_None(self): - inst = self._makeOne() - - class Dummy(object): - pass - - snapshot = Dummy() - snapshot.read_time = None - changes = {"name": snapshot} - doc_map = {} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [snapshot], [])) - self.assertEqual(snapshot.read_time, None) - - def test__compute_snapshot_doctree_and_docmap_disagree_about_length(self): - inst = self._makeOne() - doc_tree = {} - doc_map = {None: None} - self.assertRaises( - AssertionError, inst._compute_snapshot, doc_tree, doc_map, None, None, None - ) - - def test__compute_snapshot_operation_relative_ordering(self): - from google.cloud.firestore_v1.watch import WatchDocTree - - doc_tree = WatchDocTree() - - class DummyDoc(object): - update_time = mock.sentinel - - deleted_doc = DummyDoc() - added_doc = DummyDoc() - added_doc._document_path = "/added" - updated_doc = DummyDoc() - updated_doc._document_path = "/updated" - doc_tree = doc_tree.insert(deleted_doc, None) - doc_tree = doc_tree.insert(updated_doc, None) - doc_map = {"/deleted": deleted_doc, "/updated": updated_doc} - added_snapshot = DummyDocumentSnapshot(added_doc, None, True, None, None, None) - added_snapshot.reference = added_doc - updated_snapshot = DummyDocumentSnapshot( - updated_doc, None, True, None, None, None - ) - updated_snapshot.reference = updated_doc - delete_changes = ["/deleted"] - add_changes = [added_snapshot] - update_changes = [updated_snapshot] - inst = self._makeOne() - updated_tree, updated_map, applied_changes = inst._compute_snapshot( - doc_tree, doc_map, delete_changes, add_changes, update_changes - ) - # TODO: Verify that the assertion here is correct. - self.assertEqual( - updated_map, {"/updated": updated_snapshot, "/added": added_snapshot} - ) - - def test__compute_snapshot_modify_docs_updated_doc_no_timechange(self): - from google.cloud.firestore_v1.watch import WatchDocTree - - doc_tree = WatchDocTree() - - class DummyDoc(object): - pass - - updated_doc_v1 = DummyDoc() - updated_doc_v1.update_time = 1 - updated_doc_v1._document_path = "/updated" - updated_doc_v2 = DummyDoc() - updated_doc_v2.update_time = 1 - updated_doc_v2._document_path = "/updated" - doc_tree = doc_tree.insert("/updated", updated_doc_v1) - doc_map = {"/updated": updated_doc_v1} - updated_snapshot = DummyDocumentSnapshot( - updated_doc_v2, None, True, None, None, 1 - ) - delete_changes = [] - add_changes = [] - update_changes = [updated_snapshot] - inst = self._makeOne() - updated_tree, updated_map, applied_changes = inst._compute_snapshot( - doc_tree, doc_map, delete_changes, add_changes, update_changes - ) - self.assertEqual(updated_map, doc_map) # no change - - def test__compute_snapshot_deletes_w_real_comparator(self): - from google.cloud.firestore_v1.watch import WatchDocTree - - doc_tree = WatchDocTree() - - class DummyDoc(object): - update_time = mock.sentinel - - deleted_doc_1 = DummyDoc() - deleted_doc_2 = DummyDoc() - doc_tree = doc_tree.insert(deleted_doc_1, None) - doc_tree = doc_tree.insert(deleted_doc_2, None) - doc_map = {"/deleted_1": deleted_doc_1, "/deleted_2": deleted_doc_2} - delete_changes = ["/deleted_1", "/deleted_2"] - add_changes = [] - update_changes = [] - inst = self._makeOne(comparator=object()) - updated_tree, updated_map, applied_changes = inst._compute_snapshot( - doc_tree, doc_map, delete_changes, add_changes, update_changes - ) - self.assertEqual(updated_map, {}) - - def test__reset_docs(self): - from google.cloud.firestore_v1.watch import ChangeType - - inst = self._makeOne() - inst.change_map = {None: None} - from google.cloud.firestore_v1.watch import WatchDocTree - - doc = DummyDocumentReference("doc") - doc_tree = WatchDocTree() - snapshot = DummyDocumentSnapshot(doc, None, True, None, None, None) - snapshot.reference = doc - doc_tree = doc_tree.insert(snapshot, None) - inst.doc_tree = doc_tree - inst._reset_docs() - self.assertEqual(inst.change_map, {"/doc": ChangeType.REMOVED}) - self.assertEqual(inst.resume_token, None) - self.assertFalse(inst.current) - - def test_resume_token_sent_on_recovery(self): - inst = self._makeOne() - inst.resume_token = b"ABCD0123" - request = inst._get_rpc_request() - self.assertEqual(request.add_target.resume_token, b"ABCD0123") - - -class DummyFirestoreStub(object): - def Listen(self): # pragma: NO COVER - pass - - -class DummyFirestoreClient(object): - def __init__(self): - self.transport = mock.Mock(_stubs={"firestore_stub": DummyFirestoreStub()}) - - -class DummyDocumentReference(object): - def __init__(self, *document_path, **kw): - if "client" not in kw: - self._client = DummyFirestore() - else: - self._client = kw["client"] - - self._path = document_path - self._document_path = "/" + "/".join(document_path) - self.__dict__.update(kw) - - -class DummyDocument(object): - def __init__(self, name, parent): - self._name = name - self._parent = parent - - @property - def _document_path(self): - return "{}/documents/{}".format( - self._parent._client._database_string, self._name - ) - - -class DummyCollection(object): - def __init__(self, client, parent=None): - self._client = client - self._parent = parent - - def _parent_info(self): - if self._parent is None: - return "{}/documents".format(self._client._database_string), None - return self._parent._document_path, None - - -def _compare(x, y): # pragma: NO COVER - return 1 - - -class DummyQuery(object): - def __init__(self, parent): - self._comparator = _compare - self._parent = parent - - @property - def _client(self): - return self._parent._client - - def _to_protobuf(self): - return "" - - -class DummyFirestore(object): - _firestore_api = DummyFirestoreClient() - _database_string = "abc://bar/" - _rpc_metadata = None - - def document(self, *document_path): # pragma: NO COVER - if len(document_path) == 1: - path = document_path[0].split("/") - else: - path = document_path - - return DummyDocumentReference(*path, client=self) - - -class DummyDocumentSnapshot(object): - # def __init__(self, **kw): - # self.__dict__.update(kw) - def __init__(self, reference, data, exists, read_time, create_time, update_time): - self.reference = reference - self.data = data - self.exists = exists - self.read_time = read_time - self.create_time = create_time - self.update_time = update_time - - def __str__(self): - return "%s-%s" % (self.reference._document_path, self.read_time) - - def __hash__(self): - return hash(str(self)) - - -class DummyBackgroundConsumer(object): - started = False - stopped = False - is_active = True - - def __init__(self, rpc, on_snapshot): - self._rpc = rpc - self.on_snapshot = on_snapshot - - def start(self): - self.started = True - - def stop(self): - self.stopped = True - self.is_active = False - - -class DummyThread(object): - started = False - - def __init__(self, name, target, kwargs): - self.name = name - self.target = target - self.kwargs = kwargs - - def start(self): - self.started = True - - -class DummyThreading(object): - def __init__(self): - self.threads = {} - - def Thread(self, name, target, kwargs): - thread = DummyThread(name, target, kwargs) - self.threads[name] = thread - return thread - - -class DummyRpc(object): - def __init__( - self, - start_rpc, - should_recover, - should_terminate=None, - initial_request=None, - metadata=None, - ): - self.start_rpc = start_rpc - self.should_recover = should_recover - self.should_terminate = should_terminate - self.initial_request = initial_request() - self.metadata = metadata - self.closed = False - self.callbacks = [] - - def add_done_callback(self, callback): - self.callbacks.append(callback) - - def close(self): - self.closed = True - - -class DummyCause(object): - code = 1 - message = "hi" - - -class DummyChange(object): - def __init__(self): - self.target_ids = [] - self.removed_target_ids = [] - self.read_time = 0 - self.target_change_type = firestore_pb2.TargetChange.NO_CHANGE - self.resume_token = None - self.cause = DummyCause() - - -class DummyProto(object): - def __init__(self): - self.target_change = DummyChange() - self.document_change = DummyChange() - - -class DummyTarget(object): - def QueryTarget(self, **kw): - self.kw = kw - return "dummy query target" - - -class DummyPb2(object): - - Target = DummyTarget() - - def ListenRequest(self, **kw): - pass diff --git a/firestore/tests/unit/v1/testdata/create-all-transforms.json b/firestore/tests/unit/v1/testdata/create-all-transforms.json deleted file mode 100644 index 82831624bb1f..000000000000 --- a/firestore/tests/unit/v1/testdata/create-all-transforms.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "tests": [ - { - "description": "create: all transforms in a single call", - "comment": "A document can be created with any amount of transforms.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove-multi.json b/firestore/tests/unit/v1/testdata/create-arrayremove-multi.json deleted file mode 100644 index 548a9838089e..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove-multi.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "tests": [ - { - "description": "create: multiple ArrayRemove fields", - "comment": "A document can have more than one ArrayRemove field.\nSince all the ArrayRemove fields are removed, the only field in the update is \"a\".", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove-nested.json b/firestore/tests/unit/v1/testdata/create-arrayremove-nested.json deleted file mode 100644 index fa01bd7e0071..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove-nested.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "tests": [ - { - "description": "create: nested ArrayRemove field", - "comment": "An ArrayRemove value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove-noarray-nested.json b/firestore/tests/unit/v1/testdata/create-arrayremove-noarray-nested.json deleted file mode 100644 index 7d530084d448..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayRemove cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayRemove. Firestore transforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove-noarray.json b/firestore/tests/unit/v1/testdata/create-arrayremove-noarray.json deleted file mode 100644 index 99aea7e35cdf..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayRemove cannot be in an array value", - "comment": "ArrayRemove must be the value of a field. Firestore\ntransforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove-with-st.json b/firestore/tests/unit/v1/testdata/create-arrayremove-with-st.json deleted file mode 100644 index 56bdc435daff..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayremove.json b/firestore/tests/unit/v1/testdata/create-arrayremove.json deleted file mode 100644 index a69be14b7b12..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayremove.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayRemove with data", - "comment": "A key with ArrayRemove is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion-multi.json b/firestore/tests/unit/v1/testdata/create-arrayunion-multi.json deleted file mode 100644 index 7ca9852f48d9..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion-multi.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "tests": [ - { - "description": "create: multiple ArrayUnion fields", - "comment": "A document can have more than one ArrayUnion field.\nSince all the ArrayUnion fields are removed, the only field in the update is \"a\".", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "appendMissingElements": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion-nested.json b/firestore/tests/unit/v1/testdata/create-arrayunion-nested.json deleted file mode 100644 index a2f20299d3be..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion-nested.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "tests": [ - { - "description": "create: nested ArrayUnion field", - "comment": "An ArrayUnion value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion-noarray-nested.json b/firestore/tests/unit/v1/testdata/create-arrayunion-noarray-nested.json deleted file mode 100644 index b9ec5c01cbf1..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayUnion cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayUnion. Firestore transforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion-noarray.json b/firestore/tests/unit/v1/testdata/create-arrayunion-noarray.json deleted file mode 100644 index 1b85a93c45e9..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayUnion cannot be in an array value", - "comment": "ArrayUnion must be the value of a field. Firestore\ntransforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion-with-st.json b/firestore/tests/unit/v1/testdata/create-arrayunion-with-st.json deleted file mode 100644 index 2847f57490b8..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-arrayunion.json b/firestore/tests/unit/v1/testdata/create-arrayunion.json deleted file mode 100644 index 26d079946645..000000000000 --- a/firestore/tests/unit/v1/testdata/create-arrayunion.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "tests": [ - { - "description": "create: ArrayUnion with data", - "comment": "A key with ArrayUnion is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-basic.json b/firestore/tests/unit/v1/testdata/create-basic.json deleted file mode 100644 index d67558ca13dd..000000000000 --- a/firestore/tests/unit/v1/testdata/create-basic.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "tests": [ - { - "description": "create: basic", - "comment": "A simple call, resulting in a single update operation.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-complex.json b/firestore/tests/unit/v1/testdata/create-complex.json deleted file mode 100644 index a01b307f672d..000000000000 --- a/firestore/tests/unit/v1/testdata/create-complex.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "tests": [ - { - "description": "create: complex", - "comment": "A call to a write method with complicated input data.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "arrayValue": { - "values": [ - { - "integerValue": "1" - }, - { - "doubleValue": 2.5 - } - ] - } - }, - "b": { - "mapValue": { - "fields": { - "c": { - "arrayValue": { - "values": [ - { - "stringValue": "three" - }, - { - "mapValue": { - "fields": { - "d": { - "booleanValue": true - } - } - } - } - ] - } - } - } - } - } - } - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-del-noarray-nested.json b/firestore/tests/unit/v1/testdata/create-del-noarray-nested.json deleted file mode 100644 index 34d8258e1b21..000000000000 --- a/firestore/tests/unit/v1/testdata/create-del-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: Delete cannot be anywhere inside an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are implemented\nby turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not support\narray indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"Delete\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-del-noarray.json b/firestore/tests/unit/v1/testdata/create-del-noarray.json deleted file mode 100644 index dde6b334b461..000000000000 --- a/firestore/tests/unit/v1/testdata/create-del-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: Delete cannot be in an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are\nimplemented by turning the path to the Delete sentinel into a FieldPath, and FieldPaths\ndo not support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"Delete\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-empty.json b/firestore/tests/unit/v1/testdata/create-empty.json deleted file mode 100644 index 7d9f7f009872..000000000000 --- a/firestore/tests/unit/v1/testdata/create-empty.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "create: creating or setting an empty map", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": {} - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-nodel.json b/firestore/tests/unit/v1/testdata/create-nodel.json deleted file mode 100644 index dd8baaf227aa..000000000000 --- a/firestore/tests/unit/v1/testdata/create-nodel.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: Delete cannot appear in data", - "comment": "The Delete sentinel cannot be used in Create, or in Set without a Merge option.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"Delete\"}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-nosplit.json b/firestore/tests/unit/v1/testdata/create-nosplit.json deleted file mode 100644 index 8807af362e70..000000000000 --- a/firestore/tests/unit/v1/testdata/create-nosplit.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "create: don’t split on dots", - "comment": "Create and Set treat their map keys literally. They do not split on dots.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a.b": { - "mapValue": { - "fields": { - "c.d": { - "integerValue": "1" - } - } - } - }, - "e": { - "integerValue": "2" - } - } - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-special-chars.json b/firestore/tests/unit/v1/testdata/create-special-chars.json deleted file mode 100644 index 4080042000d5..000000000000 --- a/firestore/tests/unit/v1/testdata/create-special-chars.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "create: non-alpha characters in map keys", - "comment": "Create and Set treat their map keys literally. They do not escape special characters.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{ \"*\": { \".\": 1 }, \"~\": 2 }", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "*": { - "mapValue": { - "fields": { - ".": { - "integerValue": "1" - } - } - } - }, - "~": { - "integerValue": "2" - } - } - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-alone.json b/firestore/tests/unit/v1/testdata/create-st-alone.json deleted file mode 100644 index 20c5e8ec32a3..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-alone.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "create: ServerTimestamp alone", - "comment": "If the only values in the input are ServerTimestamps, then no\nupdate operation should be produced.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "setToServerValue": "REQUEST_TIME" - } - ] - }, - "currentDocument": { - "exists": false - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-multi.json b/firestore/tests/unit/v1/testdata/create-st-multi.json deleted file mode 100644 index 89430e2b64d6..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-multi.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "tests": [ - { - "description": "create: multiple ServerTimestamp fields", - "comment": "A document can have more than one ServerTimestamp field.\nSince all the ServerTimestamp fields are removed, the only field in the update is \"a\".", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c.d", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-nested.json b/firestore/tests/unit/v1/testdata/create-st-nested.json deleted file mode 100644 index f2a3a8d1f624..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-nested.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "tests": [ - { - "description": "create: nested ServerTimestamp field", - "comment": "A ServerTimestamp value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-noarray-nested.json b/firestore/tests/unit/v1/testdata/create-st-noarray-nested.json deleted file mode 100644 index 8660531dcc9a..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ServerTimestamp cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ServerTimestamp sentinel. Firestore transforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-noarray.json b/firestore/tests/unit/v1/testdata/create-st-noarray.json deleted file mode 100644 index 31104f25613c..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "create: ServerTimestamp cannot be in an array value", - "comment": "The ServerTimestamp sentinel must be the value of a field. Firestore\ntransforms don't support array indexing.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"ServerTimestamp\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st-with-empty-map.json b/firestore/tests/unit/v1/testdata/create-st-with-empty-map.json deleted file mode 100644 index 730afd154fd8..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st-with-empty-map.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "tests": [ - { - "description": "create: ServerTimestamp beside an empty map", - "comment": "When a ServerTimestamp and a map both reside inside a map, the\nServerTimestamp should be stripped out but the empty map should remain.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "mapValue": { - "fields": {} - } - } - } - } - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/create-st.json b/firestore/tests/unit/v1/testdata/create-st.json deleted file mode 100644 index 705f76ed16ac..000000000000 --- a/firestore/tests/unit/v1/testdata/create-st.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "tests": [ - { - "description": "create: ServerTimestamp with data", - "comment": "A key with the special ServerTimestamp sentinel is removed from\nthe data in the update operation. Instead it appears in a separate Transform operation.\nNote that in these tests, the string \"ServerTimestamp\" should be replaced with the\nspecial ServerTimestamp value.", - "create": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "currentDocument": { - "exists": false - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/delete-exists-precond.json b/firestore/tests/unit/v1/testdata/delete-exists-precond.json deleted file mode 100644 index 174be0eccb06..000000000000 --- a/firestore/tests/unit/v1/testdata/delete-exists-precond.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "delete: delete with exists precondition", - "comment": "Delete supports an exists precondition.", - "delete": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "exists": true - }, - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "delete": "projects/projectID/databases/(default)/documents/C/d", - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/delete-no-precond.json b/firestore/tests/unit/v1/testdata/delete-no-precond.json deleted file mode 100644 index 96fcb39a5988..000000000000 --- a/firestore/tests/unit/v1/testdata/delete-no-precond.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "tests": [ - { - "description": "delete: delete without precondition", - "comment": "An ordinary Delete call.", - "delete": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "delete": "projects/projectID/databases/(default)/documents/C/d" - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/delete-time-precond.json b/firestore/tests/unit/v1/testdata/delete-time-precond.json deleted file mode 100644 index 160defb3fedb..000000000000 --- a/firestore/tests/unit/v1/testdata/delete-time-precond.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "delete: delete with last-update-time precondition", - "comment": "Delete supports a last-update-time precondition.", - "delete": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "updateTime": "1970-01-01T00:00:42Z" - }, - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "delete": "projects/projectID/databases/(default)/documents/C/d", - "currentDocument": { - "updateTime": "1970-01-01T00:00:42Z" - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/get-basic.json b/firestore/tests/unit/v1/testdata/get-basic.json deleted file mode 100644 index 0a2cd2d4a1b7..000000000000 --- a/firestore/tests/unit/v1/testdata/get-basic.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "tests": [ - { - "description": "get: get a document", - "comment": "A call to DocumentRef.Get", - "get": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "request": { - "name": "projects/projectID/databases/(default)/documents/C/d" - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-add-mod-del-add.json b/firestore/tests/unit/v1/testdata/listen-add-mod-del-add.json deleted file mode 100644 index d05997332df0..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-add-mod-del-add.json +++ /dev/null @@ -1,206 +0,0 @@ -{ - "tests": [ - { - "description": "listen: add a doc, modify it, delete it, then add it again", - "comment": "Various changes to a single document.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "documentDelete": { - "document": "projects/projectID/databases/(default)/documents/C/d1" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:03Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:04Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - } - ], - "changes": [ - { - "kind": "MODIFIED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - } - } - ], - "readTime": "1970-01-01T00:00:02Z" - }, - { - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "newIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:03Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:04Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-add-one.json b/firestore/tests/unit/v1/testdata/listen-add-one.json deleted file mode 100644 index 8223180a8765..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-add-one.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "tests": [ - { - "description": "listen: add a doc", - "comment": "Snapshot with a single document.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-add-three.json b/firestore/tests/unit/v1/testdata/listen-add-three.json deleted file mode 100644 index 6ea117a7cc38..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-add-three.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "tests": [ - { - "description": "listen: add three documents", - "comment": "A snapshot with three documents. The documents are sorted\nfirst by the \"a\" field, then by their path. The changes are ordered the same way.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 2 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-doc-remove.json b/firestore/tests/unit/v1/testdata/listen-doc-remove.json deleted file mode 100644 index 59af7d11a6e8..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-doc-remove.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "tests": [ - { - "description": "listen: DocumentRemove behaves like DocumentDelete", - "comment": "The DocumentRemove response behaves exactly like DocumentDelete.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentRemove": { - "document": "projects/projectID/databases/(default)/documents/C/d1" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "newIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-empty.json b/firestore/tests/unit/v1/testdata/listen-empty.json deleted file mode 100644 index 734aa41f9ee7..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-empty.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tests": [ - { - "description": "listen: no changes; empty snapshot", - "comment": "There are no changes, so the snapshot should be empty.", - "listen": { - "responses": [ - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - } - ], - "snapshots": [ - { - "readTime": "1970-01-01T00:00:01Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-filter-nop.json b/firestore/tests/unit/v1/testdata/listen-filter-nop.json deleted file mode 100644 index a7c09e97d99a..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-filter-nop.json +++ /dev/null @@ -1,203 +0,0 @@ -{ - "tests": [ - { - "description": "listen: Filter response with same size is a no-op", - "comment": "A Filter response whose count matches the size of the current\nstate (docs in last snapshot + docs added - docs deleted) is a no-op.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentDelete": { - "document": "projects/projectID/databases/(default)/documents/C/d1" - } - }, - { - "filter": { - "count": 2 - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": 1, - "newIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-multi-docs.json b/firestore/tests/unit/v1/testdata/listen-multi-docs.json deleted file mode 100644 index fe5b0f0bbf9b..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-multi-docs.json +++ /dev/null @@ -1,414 +0,0 @@ -{ - "tests": [ - { - "description": "listen: multiple documents, added, deleted and updated", - "comment": "Changes should be ordered with deletes first, then additions, then mods,\neach in query order.\nOld indices refer to the immediately previous state, not the previous snapshot", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d5", - "fields": { - "a": { - "integerValue": "4" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentDelete": { - "document": "projects/projectID/databases/(default)/documents/C/d3" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "-1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d6", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentDelete": { - "document": "projects/projectID/databases/(default)/documents/C/d2" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "-2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:04Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 2 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 3 - } - ], - "readTime": "1970-01-01T00:00:02Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "-2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "-1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d6", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d5", - "fields": { - "a": { - "integerValue": "4" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "newIndex": -1 - }, - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "newIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d6", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 2 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d5", - "fields": { - "a": { - "integerValue": "4" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 3 - }, - { - "kind": "MODIFIED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d4", - "fields": { - "a": { - "integerValue": "-2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - } - }, - { - "kind": "MODIFIED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "-1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "oldIndex": 1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:04Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-nocurrent.json b/firestore/tests/unit/v1/testdata/listen-nocurrent.json deleted file mode 100644 index 158595e963df..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-nocurrent.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "tests": [ - { - "description": "listen: no snapshot if we don't see CURRENT", - "comment": "If the watch state is not marked CURRENT, no snapshot is issued.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "oldIndex": -1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-nomod.json b/firestore/tests/unit/v1/testdata/listen-nomod.json deleted file mode 100644 index 0e454d51286a..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-nomod.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "tests": [ - { - "description": "listen: add a doc, then change it but without changing its update time", - "comment": "Document updates are recognized by a change in the update time, not the data.\nThis shouldn't actually happen. It is just a test of the update logic.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "documentDelete": { - "document": "projects/projectID/databases/(default)/documents/C/d1" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:03Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "newIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:03Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-removed-target-ids.json b/firestore/tests/unit/v1/testdata/listen-removed-target-ids.json deleted file mode 100644 index 57c91b7bd7f5..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-removed-target-ids.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "tests": [ - { - "description": "listen: DocumentChange with removed_target_id is like a delete.", - "comment": "A DocumentChange with the watch target ID in the removed_target_ids field is the\nsame as deleting a document.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "removedTargetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "newIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:02Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-reset.json b/firestore/tests/unit/v1/testdata/listen-reset.json deleted file mode 100644 index d988a1ba9bf0..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-reset.json +++ /dev/null @@ -1,309 +0,0 @@ -{ - "tests": [ - { - "description": "listen: RESET turns off CURRENT", - "comment": "A RESET message turns off the CURRENT state, and marks all documents as deleted.\n\nIf a document appeared on the stream but was never part of a snapshot (\"d3\" in this test), a reset\nwill make it disappear completely.\n\nFor a snapshot to happen at a NO_CHANGE reponse, we need to have both seen a CURRENT response, and\nhave a change from the previous snapshot. Here, after the reset, we see the same version of d2\nagain. That doesn't result in a snapshot.\n", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "RESET" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:03Z" - } - }, - { - "targetChange": { - "targetChangeType": "RESET" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:04Z" - } - }, - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:05Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "1" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "oldIndex": -1 - }, - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - } - ], - "changes": [ - { - "kind": "REMOVED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "2" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": 1, - "newIndex": -1 - }, - { - "kind": "MODIFIED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - } - } - ], - "readTime": "1970-01-01T00:00:03Z" - }, - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d2", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:03Z" - }, - { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d3", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:02Z" - }, - "oldIndex": -1, - "newIndex": 1 - } - ], - "readTime": "1970-01-01T00:00:05Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-target-add-nop.json b/firestore/tests/unit/v1/testdata/listen-target-add-nop.json deleted file mode 100644 index e864ea58221a..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-target-add-nop.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "tests": [ - { - "description": "listen: TargetChange_ADD is a no-op if it has the same target ID", - "comment": "A TargetChange_ADD response must have the same watch target ID.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "targetChangeType": "ADD", - "targetIds": [ - 1 - ], - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - } - ], - "snapshots": [ - { - "docs": [ - { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - } - ], - "changes": [ - { - "kind": "ADDED", - "doc": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "oldIndex": -1 - } - ], - "readTime": "1970-01-01T00:00:01Z" - } - ] - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-target-add-wrong-id.json b/firestore/tests/unit/v1/testdata/listen-target-add-wrong-id.json deleted file mode 100644 index 5bd295d50572..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-target-add-wrong-id.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "tests": [ - { - "description": "listen: TargetChange_ADD is an error if it has a different target ID", - "comment": "A TargetChange_ADD response must have the same watch target ID.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "targetChangeType": "ADD", - "targetIds": [ - 2 - ], - "readTime": "1970-01-01T00:00:02Z" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/listen-target-remove.json b/firestore/tests/unit/v1/testdata/listen-target-remove.json deleted file mode 100644 index 2b11e280eb19..000000000000 --- a/firestore/tests/unit/v1/testdata/listen-target-remove.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "tests": [ - { - "description": "listen: TargetChange_REMOVE should not appear", - "comment": "A TargetChange_REMOVE response should never be sent.", - "listen": { - "responses": [ - { - "documentChange": { - "document": { - "name": "projects/projectID/databases/(default)/documents/C/d1", - "fields": { - "a": { - "integerValue": "3" - } - }, - "createTime": "1970-01-01T00:00:01Z", - "updateTime": "1970-01-01T00:00:01Z" - }, - "targetIds": [ - 1 - ] - } - }, - { - "targetChange": { - "targetChangeType": "CURRENT" - } - }, - { - "targetChange": { - "targetChangeType": "REMOVE" - } - }, - { - "targetChange": { - "readTime": "1970-01-01T00:00:01Z" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-arrayremove-cursor.json b/firestore/tests/unit/v1/testdata/query-arrayremove-cursor.json deleted file mode 100644 index 9e396b358cd6..000000000000 --- a/firestore/tests/unit/v1/testdata/query-arrayremove-cursor.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "query: ArrayRemove in cursor method", - "comment": "ArrayRemove is not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": { - "jsonValues": [ - "[\"ArrayRemove\", 1, 2, 3]" - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-arrayremove-where.json b/firestore/tests/unit/v1/testdata/query-arrayremove-where.json deleted file mode 100644 index c488bba85afc..000000000000 --- a/firestore/tests/unit/v1/testdata/query-arrayremove-where.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: ArrayRemove in Where", - "comment": "ArrayRemove is not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "[\"ArrayRemove\", 1, 2, 3]" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-arrayunion-cursor.json b/firestore/tests/unit/v1/testdata/query-arrayunion-cursor.json deleted file mode 100644 index 8259d31cc75e..000000000000 --- a/firestore/tests/unit/v1/testdata/query-arrayunion-cursor.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "query: ArrayUnion in cursor method", - "comment": "ArrayUnion is not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": { - "jsonValues": [ - "[\"ArrayUnion\", 1, 2, 3]" - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-arrayunion-where.json b/firestore/tests/unit/v1/testdata/query-arrayunion-where.json deleted file mode 100644 index 9f298d84e02c..000000000000 --- a/firestore/tests/unit/v1/testdata/query-arrayunion-where.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: ArrayUnion in Where", - "comment": "ArrayUnion is not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "[\"ArrayUnion\", 1, 2, 3]" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-bad-NaN.json b/firestore/tests/unit/v1/testdata/query-bad-NaN.json deleted file mode 100644 index 47344309fe6b..000000000000 --- a/firestore/tests/unit/v1/testdata/query-bad-NaN.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: where clause with non-== comparison with NaN", - "comment": "You can only compare NaN for equality.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003c", - "jsonValue": "\"NaN\"" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-bad-null.json b/firestore/tests/unit/v1/testdata/query-bad-null.json deleted file mode 100644 index 340afb9332db..000000000000 --- a/firestore/tests/unit/v1/testdata/query-bad-null.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: where clause with non-== comparison with Null", - "comment": "You can only compare Null for equality.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003e", - "jsonValue": "null" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-order.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap-order.json deleted file mode 100644 index 89d2696dd493..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-order.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor methods with a document snapshot, existing orderBy", - "comment": "When a document snapshot is used, the client appends a __name__ order-by clause\nwith the direction of the last order-by clause.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "orderBy": { - "path": { - "field": [ - "b" - ] - }, - "direction": "desc" - } - }, - { - "startAfter": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - }, - { - "field": { - "fieldPath": "b" - }, - "direction": "DESCENDING" - }, - { - "field": { - "fieldPath": "__name__" - }, - "direction": "DESCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - }, - { - "integerValue": "8" - }, - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ] - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-orderby-name.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap-orderby-name.json deleted file mode 100644 index 189b302a0b73..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-orderby-name.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor method, doc snapshot, existing orderBy __name__", - "comment": "If there is an existing orderBy clause on __name__,\nno changes are made to the list of orderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "desc" - } - }, - { - "orderBy": { - "path": { - "field": [ - "__name__" - ] - }, - "direction": "asc" - } - }, - { - "startAt": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - }, - { - "endAt": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "DESCENDING" - }, - { - "field": { - "fieldPath": "__name__" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - }, - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ], - "before": true - }, - "endAt": { - "values": [ - { - "integerValue": "7" - }, - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ] - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-eq.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-eq.json deleted file mode 100644 index 41bc9bf1c07c..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-eq.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor methods with a document snapshot and an equality where clause", - "comment": "A Where clause using equality doesn't change the implicit orderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "3" - } - }, - { - "endAt": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "fieldFilter": { - "field": { - "fieldPath": "a" - }, - "op": "EQUAL", - "value": { - "integerValue": "3" - } - } - }, - "orderBy": [ - { - "field": { - "fieldPath": "__name__" - }, - "direction": "ASCENDING" - } - ], - "endAt": { - "values": [ - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ] - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq-orderby.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq-orderby.json deleted file mode 100644 index ce99f786d39f..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq-orderby.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor method, doc snapshot, inequality where clause, and existing orderBy clause", - "comment": "If there is an OrderBy clause, the inequality Where clause does\nnot result in a new OrderBy clause. We still add a __name__ OrderBy clause", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "desc" - } - }, - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003c", - "jsonValue": "4" - } - }, - { - "startAt": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "fieldFilter": { - "field": { - "fieldPath": "a" - }, - "op": "LESS_THAN", - "value": { - "integerValue": "4" - } - } - }, - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "DESCENDING" - }, - { - "field": { - "fieldPath": "__name__" - }, - "direction": "DESCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - }, - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq.json deleted file mode 100644 index 384bb7c2042a..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap-where-neq.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor method with a document snapshot and an inequality where clause", - "comment": "A Where clause with an inequality results in an OrderBy clause\non that clause's path, if there are no other OrderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003c=", - "jsonValue": "3" - } - }, - { - "endBefore": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "fieldFilter": { - "field": { - "fieldPath": "a" - }, - "op": "LESS_THAN_OR_EQUAL", - "value": { - "integerValue": "3" - } - } - }, - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - }, - { - "field": { - "fieldPath": "__name__" - }, - "direction": "ASCENDING" - } - ], - "endAt": { - "values": [ - { - "integerValue": "7" - }, - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-docsnap.json b/firestore/tests/unit/v1/testdata/query-cursor-docsnap.json deleted file mode 100644 index ea84c01729e6..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-docsnap.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor methods with a document snapshot", - "comment": "When a document snapshot is used, the client appends a __name__ order-by clause.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "startAt": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "__name__" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty-map.json b/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty-map.json deleted file mode 100644 index 3d02cbca2127..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty-map.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "tests": [ - { - "description": "query: EndBefore with explicit empty map", - "comment": "Cursor methods are allowed to use empty maps with EndBefore. It should result in an empty map in the query.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": { - "jsonValues": [ - "{}" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - } - ], - "endAt": { - "values": [ - { - "mapValue": { - "fields": {} - } - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty.json b/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty.json deleted file mode 100644 index c491dcd79882..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-endbefore-empty.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tests": [ - { - "description": "query: EndBefore with empty values", - "comment": "Cursor methods are not allowed to use empty values with EndBefore. It should result in an error.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": {} - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-no-order.json b/firestore/tests/unit/v1/testdata/query-cursor-no-order.json deleted file mode 100644 index 45823b228483..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-no-order.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor method without orderBy", - "comment": "If a cursor method with a list of values is provided, there must be at least as many\nexplicit orderBy clauses as values.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "startAt": { - "jsonValues": [ - "2" - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-startat-empty-map.json b/firestore/tests/unit/v1/testdata/query-cursor-startat-empty-map.json deleted file mode 100644 index 788588f76424..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-startat-empty-map.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "tests": [ - { - "description": "query: StartAt with explicit empty map", - "comment": "Cursor methods are allowed to use empty maps with StartAt. It should result in an empty map in the query.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "startAt": { - "jsonValues": [ - "{}" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "mapValue": { - "fields": {} - } - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-startat-empty.json b/firestore/tests/unit/v1/testdata/query-cursor-startat-empty.json deleted file mode 100644 index c0c5a09801d4..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-startat-empty.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tests": [ - { - "description": "query: StartAt with empty values", - "comment": "Cursor methods are not allowed to use empty values with StartAt. It should result in an error.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "startAt": {} - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-vals-1a.json b/firestore/tests/unit/v1/testdata/query-cursor-vals-1a.json deleted file mode 100644 index 038d177f1535..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-vals-1a.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "tests": [ - { - "description": "query: StartAt/EndBefore with values", - "comment": "Cursor methods take the same number of values as there are OrderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "startAt": { - "jsonValues": [ - "7" - ] - } - }, - { - "endBefore": { - "jsonValues": [ - "9" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - } - ], - "before": true - }, - "endAt": { - "values": [ - { - "integerValue": "9" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-vals-1b.json b/firestore/tests/unit/v1/testdata/query-cursor-vals-1b.json deleted file mode 100644 index 089cff93bdef..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-vals-1b.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "tests": [ - { - "description": "query: StartAfter/EndAt with values", - "comment": "Cursor methods take the same number of values as there are OrderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "startAfter": { - "jsonValues": [ - "7" - ] - } - }, - { - "endAt": { - "jsonValues": [ - "9" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - } - ] - }, - "endAt": { - "values": [ - { - "integerValue": "9" - } - ] - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-vals-2.json b/firestore/tests/unit/v1/testdata/query-cursor-vals-2.json deleted file mode 100644 index 8554b436039a..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-vals-2.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "tests": [ - { - "description": "query: Start/End with two values", - "comment": "Cursor methods take the same number of values as there are OrderBy clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "orderBy": { - "path": { - "field": [ - "b" - ] - }, - "direction": "desc" - } - }, - { - "startAt": { - "jsonValues": [ - "7", - "8" - ] - } - }, - { - "endAt": { - "jsonValues": [ - "9", - "10" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - }, - { - "field": { - "fieldPath": "b" - }, - "direction": "DESCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "7" - }, - { - "integerValue": "8" - } - ], - "before": true - }, - "endAt": { - "values": [ - { - "integerValue": "9" - }, - { - "integerValue": "10" - } - ] - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-vals-docid.json b/firestore/tests/unit/v1/testdata/query-cursor-vals-docid.json deleted file mode 100644 index 6492b3f19527..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-vals-docid.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor methods with __name__", - "comment": "Cursor values corresponding to a __name__ field take the document path relative to the\nquery's collection.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "__name__" - ] - }, - "direction": "asc" - } - }, - { - "startAfter": { - "jsonValues": [ - "\"D1\"" - ] - } - }, - { - "endBefore": { - "jsonValues": [ - "\"D2\"" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "__name__" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D1" - } - ] - }, - "endAt": { - "values": [ - { - "referenceValue": "projects/projectID/databases/(default)/documents/C/D2" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-cursor-vals-last-wins.json b/firestore/tests/unit/v1/testdata/query-cursor-vals-last-wins.json deleted file mode 100644 index 4a46b2f789d5..000000000000 --- a/firestore/tests/unit/v1/testdata/query-cursor-vals-last-wins.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "tests": [ - { - "description": "query: cursor methods, last one wins", - "comment": "When multiple Start* or End* calls occur, the values of the last one are used.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "startAfter": { - "jsonValues": [ - "1" - ] - } - }, - { - "startAt": { - "jsonValues": [ - "2" - ] - } - }, - { - "endAt": { - "jsonValues": [ - "3" - ] - } - }, - { - "endBefore": { - "jsonValues": [ - "4" - ] - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "a" - }, - "direction": "ASCENDING" - } - ], - "startAt": { - "values": [ - { - "integerValue": "2" - } - ], - "before": true - }, - "endAt": { - "values": [ - { - "integerValue": "4" - } - ], - "before": true - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-del-cursor.json b/firestore/tests/unit/v1/testdata/query-del-cursor.json deleted file mode 100644 index 921ace131d28..000000000000 --- a/firestore/tests/unit/v1/testdata/query-del-cursor.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "query: Delete in cursor method", - "comment": "Sentinel values are not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": { - "jsonValues": [ - "\"Delete\"" - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-del-where.json b/firestore/tests/unit/v1/testdata/query-del-where.json deleted file mode 100644 index 2075e3578078..000000000000 --- a/firestore/tests/unit/v1/testdata/query-del-where.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: Delete in Where", - "comment": "Sentinel values are not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "\"Delete\"" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-invalid-operator.json b/firestore/tests/unit/v1/testdata/query-invalid-operator.json deleted file mode 100644 index 064164dc0d89..000000000000 --- a/firestore/tests/unit/v1/testdata/query-invalid-operator.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: invalid operator in Where clause", - "comment": "The != operator is not supported.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "!=", - "jsonValue": "4" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-invalid-path-order.json b/firestore/tests/unit/v1/testdata/query-invalid-path-order.json deleted file mode 100644 index d0c5ba654f61..000000000000 --- a/firestore/tests/unit/v1/testdata/query-invalid-path-order.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: invalid path in OrderBy clause", - "comment": "The path has an empty component.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "*", - "" - ] - }, - "direction": "asc" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-invalid-path-select.json b/firestore/tests/unit/v1/testdata/query-invalid-path-select.json deleted file mode 100644 index fa18f72817a4..000000000000 --- a/firestore/tests/unit/v1/testdata/query-invalid-path-select.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "tests": [ - { - "description": "query: invalid path in Where clause", - "comment": "The path has an empty component.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "select": { - "fields": [ - { - "field": [ - "*", - "" - ] - } - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-invalid-path-where.json b/firestore/tests/unit/v1/testdata/query-invalid-path-where.json deleted file mode 100644 index a5b2add33360..000000000000 --- a/firestore/tests/unit/v1/testdata/query-invalid-path-where.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "tests": [ - { - "description": "query: invalid path in Where clause", - "comment": "The path has an empty component.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "*", - "" - ] - }, - "op": "==", - "jsonValue": "4" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-offset-limit-last-wins.json b/firestore/tests/unit/v1/testdata/query-offset-limit-last-wins.json deleted file mode 100644 index 8788826081ef..000000000000 --- a/firestore/tests/unit/v1/testdata/query-offset-limit-last-wins.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "tests": [ - { - "description": "query: multiple Offset and Limit clauses", - "comment": "With multiple Offset or Limit clauses, the last one wins.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "offset": 2 - }, - { - "limit": 3 - }, - { - "limit": 4 - }, - { - "offset": 5 - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "offset": 5, - "limit": 4 - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-offset-limit.json b/firestore/tests/unit/v1/testdata/query-offset-limit.json deleted file mode 100644 index 3429dce0e89d..000000000000 --- a/firestore/tests/unit/v1/testdata/query-offset-limit.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "tests": [ - { - "description": "query: Offset and Limit clauses", - "comment": "Offset and Limit clauses.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "offset": 2 - }, - { - "limit": 3 - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "offset": 2, - "limit": 3 - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-order.json b/firestore/tests/unit/v1/testdata/query-order.json deleted file mode 100644 index f6670f060db9..000000000000 --- a/firestore/tests/unit/v1/testdata/query-order.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "tests": [ - { - "description": "query: basic OrderBy clauses", - "comment": "Multiple OrderBy clauses combine.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "b" - ] - }, - "direction": "asc" - } - }, - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "desc" - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "orderBy": [ - { - "field": { - "fieldPath": "b" - }, - "direction": "ASCENDING" - }, - { - "field": { - "fieldPath": "a" - }, - "direction": "DESCENDING" - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-select-empty.json b/firestore/tests/unit/v1/testdata/query-select-empty.json deleted file mode 100644 index 8dda741a63e8..000000000000 --- a/firestore/tests/unit/v1/testdata/query-select-empty.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "tests": [ - { - "description": "query: empty Select clause", - "comment": "An empty Select clause selects just the document ID.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "select": { - "fields": [] - } - } - ], - "query": { - "select": { - "fields": [ - { - "fieldPath": "__name__" - } - ] - }, - "from": [ - { - "collectionId": "C" - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-select-last-wins.json b/firestore/tests/unit/v1/testdata/query-select-last-wins.json deleted file mode 100644 index 9df4d13d054c..000000000000 --- a/firestore/tests/unit/v1/testdata/query-select-last-wins.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "tests": [ - { - "description": "query: two Select clauses", - "comment": "The last Select clause is the only one used.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "select": { - "fields": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ] - } - }, - { - "select": { - "fields": [ - { - "field": [ - "c" - ] - } - ] - } - } - ], - "query": { - "select": { - "fields": [ - { - "fieldPath": "c" - } - ] - }, - "from": [ - { - "collectionId": "C" - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-select.json b/firestore/tests/unit/v1/testdata/query-select.json deleted file mode 100644 index cfaab8f1f55a..000000000000 --- a/firestore/tests/unit/v1/testdata/query-select.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "tests": [ - { - "description": "query: Select clause with some fields", - "comment": "An ordinary Select clause.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "select": { - "fields": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ] - } - } - ], - "query": { - "select": { - "fields": [ - { - "fieldPath": "a" - }, - { - "fieldPath": "b" - } - ] - }, - "from": [ - { - "collectionId": "C" - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-st-cursor.json b/firestore/tests/unit/v1/testdata/query-st-cursor.json deleted file mode 100644 index d42416ee1dd8..000000000000 --- a/firestore/tests/unit/v1/testdata/query-st-cursor.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "query: ServerTimestamp in cursor method", - "comment": "Sentinel values are not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "orderBy": { - "path": { - "field": [ - "a" - ] - }, - "direction": "asc" - } - }, - { - "endBefore": { - "jsonValues": [ - "\"ServerTimestamp\"" - ] - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-st-where.json b/firestore/tests/unit/v1/testdata/query-st-where.json deleted file mode 100644 index 1584bb9b47b5..000000000000 --- a/firestore/tests/unit/v1/testdata/query-st-where.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "query: ServerTimestamp in Where", - "comment": "Sentinel values are not permitted in queries.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "\"ServerTimestamp\"" - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-where-2.json b/firestore/tests/unit/v1/testdata/query-where-2.json deleted file mode 100644 index a78beb264642..000000000000 --- a/firestore/tests/unit/v1/testdata/query-where-2.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "tests": [ - { - "description": "query: two Where clauses", - "comment": "Multiple Where clauses are combined into a composite filter.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003e=", - "jsonValue": "5" - } - }, - { - "where": { - "path": { - "field": [ - "b" - ] - }, - "op": "\u003c", - "jsonValue": "\"foo\"" - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "compositeFilter": { - "op": "AND", - "filters": [ - { - "fieldFilter": { - "field": { - "fieldPath": "a" - }, - "op": "GREATER_THAN_OR_EQUAL", - "value": { - "integerValue": "5" - } - } - }, - { - "fieldFilter": { - "field": { - "fieldPath": "b" - }, - "op": "LESS_THAN", - "value": { - "stringValue": "foo" - } - } - } - ] - } - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-where-NaN.json b/firestore/tests/unit/v1/testdata/query-where-NaN.json deleted file mode 100644 index c091fe5c091c..000000000000 --- a/firestore/tests/unit/v1/testdata/query-where-NaN.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "query: a Where clause comparing to NaN", - "comment": "A Where clause that tests for equality with NaN results in a unary filter.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "\"NaN\"" - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "unaryFilter": { - "op": "IS_NAN", - "field": { - "fieldPath": "a" - } - } - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-where-null.json b/firestore/tests/unit/v1/testdata/query-where-null.json deleted file mode 100644 index 6862dd97f6cf..000000000000 --- a/firestore/tests/unit/v1/testdata/query-where-null.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "query: a Where clause comparing to null", - "comment": "A Where clause that tests for equality with null results in a unary filter.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "==", - "jsonValue": "null" - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "unaryFilter": { - "op": "IS_NULL", - "field": { - "fieldPath": "a" - } - } - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-where.json b/firestore/tests/unit/v1/testdata/query-where.json deleted file mode 100644 index b132c3030f02..000000000000 --- a/firestore/tests/unit/v1/testdata/query-where.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "tests": [ - { - "description": "query: Where clause", - "comment": "A simple Where clause.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "where": { - "path": { - "field": [ - "a" - ] - }, - "op": "\u003e", - "jsonValue": "5" - } - } - ], - "query": { - "from": [ - { - "collectionId": "C" - } - ], - "where": { - "fieldFilter": { - "field": { - "fieldPath": "a" - }, - "op": "GREATER_THAN", - "value": { - "integerValue": "5" - } - } - } - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/query-wrong-collection.json b/firestore/tests/unit/v1/testdata/query-wrong-collection.json deleted file mode 100644 index 6a677f53decf..000000000000 --- a/firestore/tests/unit/v1/testdata/query-wrong-collection.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "query: doc snapshot with wrong collection in cursor method", - "comment": "If a document snapshot is passed to a Start*/End* method, it must be in the\nsame collection as the query.", - "query": { - "collPath": "projects/projectID/databases/(default)/documents/C", - "clauses": [ - { - "endBefore": { - "docSnapshot": { - "path": "projects/projectID/databases/(default)/documents/C2/D", - "jsonData": "{\"a\": 7, \"b\": 8}" - } - } - } - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-all-transforms.json b/firestore/tests/unit/v1/testdata/set-all-transforms.json deleted file mode 100644 index 5c8b1373d4c0..000000000000 --- a/firestore/tests/unit/v1/testdata/set-all-transforms.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "tests": [ - { - "description": "set: all transforms in a single call", - "comment": "A document can be created with any amount of transforms.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove-multi.json b/firestore/tests/unit/v1/testdata/set-arrayremove-multi.json deleted file mode 100644 index 3ea9b0dbd8a8..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove-multi.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "tests": [ - { - "description": "set: multiple ArrayRemove fields", - "comment": "A document can have more than one ArrayRemove field.\nSince all the ArrayRemove fields are removed, the only field in the update is \"a\".", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove-nested.json b/firestore/tests/unit/v1/testdata/set-arrayremove-nested.json deleted file mode 100644 index 4db133f2c54c..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove-nested.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "tests": [ - { - "description": "set: nested ArrayRemove field", - "comment": "An ArrayRemove value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove-noarray-nested.json b/firestore/tests/unit/v1/testdata/set-arrayremove-noarray-nested.json deleted file mode 100644 index 96965faa660d..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayRemove cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayRemove. Firestore transforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove-noarray.json b/firestore/tests/unit/v1/testdata/set-arrayremove-noarray.json deleted file mode 100644 index cd0e04468bdf..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayRemove cannot be in an array value", - "comment": "ArrayRemove must be the value of a field. Firestore\ntransforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove-with-st.json b/firestore/tests/unit/v1/testdata/set-arrayremove-with-st.json deleted file mode 100644 index 146e41fdf439..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayremove.json b/firestore/tests/unit/v1/testdata/set-arrayremove.json deleted file mode 100644 index 18969ef80a5f..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayremove.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayRemove with data", - "comment": "A key with ArrayRemove is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion-multi.json b/firestore/tests/unit/v1/testdata/set-arrayunion-multi.json deleted file mode 100644 index 3d076397c5ff..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion-multi.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "tests": [ - { - "description": "set: multiple ArrayUnion fields", - "comment": "A document can have more than one ArrayUnion field.\nSince all the ArrayUnion fields are removed, the only field in the update is \"a\".", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "appendMissingElements": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion-nested.json b/firestore/tests/unit/v1/testdata/set-arrayunion-nested.json deleted file mode 100644 index e265f6c61375..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion-nested.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "tests": [ - { - "description": "set: nested ArrayUnion field", - "comment": "An ArrayUnion value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion-noarray-nested.json b/firestore/tests/unit/v1/testdata/set-arrayunion-noarray-nested.json deleted file mode 100644 index c9b1385e03ad..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayUnion cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayUnion. Firestore transforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion-noarray.json b/firestore/tests/unit/v1/testdata/set-arrayunion-noarray.json deleted file mode 100644 index 4379578bd838..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayUnion cannot be in an array value", - "comment": "ArrayUnion must be the value of a field. Firestore\ntransforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion-with-st.json b/firestore/tests/unit/v1/testdata/set-arrayunion-with-st.json deleted file mode 100644 index d65436af2055..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-arrayunion.json b/firestore/tests/unit/v1/testdata/set-arrayunion.json deleted file mode 100644 index 856e07517327..000000000000 --- a/firestore/tests/unit/v1/testdata/set-arrayunion.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "tests": [ - { - "description": "set: ArrayUnion with data", - "comment": "A key with ArrayUnion is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-basic.json b/firestore/tests/unit/v1/testdata/set-basic.json deleted file mode 100644 index f322509126d3..000000000000 --- a/firestore/tests/unit/v1/testdata/set-basic.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tests": [ - { - "description": "set: basic", - "comment": "A simple call, resulting in a single update operation.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-complex.json b/firestore/tests/unit/v1/testdata/set-complex.json deleted file mode 100644 index aa871ddae6c7..000000000000 --- a/firestore/tests/unit/v1/testdata/set-complex.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "tests": [ - { - "description": "set: complex", - "comment": "A call to a write method with complicated input data.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "arrayValue": { - "values": [ - { - "integerValue": "1" - }, - { - "doubleValue": 2.5 - } - ] - } - }, - "b": { - "mapValue": { - "fields": { - "c": { - "arrayValue": { - "values": [ - { - "stringValue": "three" - }, - { - "mapValue": { - "fields": { - "d": { - "booleanValue": true - } - } - } - } - ] - } - } - } - } - } - } - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-merge-alone.json b/firestore/tests/unit/v1/testdata/set-del-merge-alone.json deleted file mode 100644 index 7a8ba5d5458c..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-merge-alone.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Delete with merge", - "comment": "A Delete sentinel can appear with a merge option. If the delete\npaths are the only ones to be merged, then no document is sent, just an update mask.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "b", - "c" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d" - }, - "updateMask": { - "fieldPaths": [ - "b.c" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-merge.json b/firestore/tests/unit/v1/testdata/set-del-merge.json deleted file mode 100644 index 6a5759c12555..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-merge.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Delete with merge", - "comment": "A Delete sentinel can appear with a merge option.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b", - "c" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b.c" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-mergeall.json b/firestore/tests/unit/v1/testdata/set-del-mergeall.json deleted file mode 100644 index 6106a3e4f229..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-mergeall.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "tests": [ - { - "description": "set: Delete with MergeAll", - "comment": "A Delete sentinel can appear with a mergeAll option.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b.c" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-noarray-nested.json b/firestore/tests/unit/v1/testdata/set-del-noarray-nested.json deleted file mode 100644 index 5a2303284e48..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: Delete cannot be anywhere inside an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are implemented\nby turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not support\narray indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"Delete\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-noarray.json b/firestore/tests/unit/v1/testdata/set-del-noarray.json deleted file mode 100644 index dee9c75f6972..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: Delete cannot be in an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are\nimplemented by turning the path to the Delete sentinel into a FieldPath, and FieldPaths\ndo not support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"Delete\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-nomerge.json b/firestore/tests/unit/v1/testdata/set-del-nomerge.json deleted file mode 100644 index 67e3b74b8607..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-nomerge.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Delete cannot appear in an unmerged field", - "comment": "The client signals an error if the Delete sentinel is in the\ninput data, but not selected by a merge option, because this is most likely a programming\nbug.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": \"Delete\"}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-nonleaf.json b/firestore/tests/unit/v1/testdata/set-del-nonleaf.json deleted file mode 100644 index 67c864957ca8..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-nonleaf.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Delete cannot appear as part of a merge path", - "comment": "If a Delete is part of the value at a merge path, then the user is\nconfused: their merge path says \"replace this entire value\" but their Delete says\n\"delete this part of the value\". This should be an error, just as if they specified Delete\nin a Set with no merge.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "h" - ] - } - ] - }, - "jsonData": "{\"h\": {\"g\": \"Delete\"}}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-del-wo-merge.json b/firestore/tests/unit/v1/testdata/set-del-wo-merge.json deleted file mode 100644 index 32d860a626df..000000000000 --- a/firestore/tests/unit/v1/testdata/set-del-wo-merge.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: Delete cannot appear unless a merge option is specified", - "comment": "Without a merge option, Set replaces the document with the input\ndata. A Delete sentinel in the data makes no sense in this case.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"Delete\"}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-empty.json b/firestore/tests/unit/v1/testdata/set-empty.json deleted file mode 100644 index 924992caf308..000000000000 --- a/firestore/tests/unit/v1/testdata/set-empty.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "set: creating or setting an empty map", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": {} - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge-fp.json b/firestore/tests/unit/v1/testdata/set-merge-fp.json deleted file mode 100644 index 8a5b0faa6e2a..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge-fp.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Merge with FieldPaths", - "comment": "A merge with fields that use special characters.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "*", - "~" - ] - } - ] - }, - "jsonData": "{\"*\": {\"~\": true}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "*": { - "mapValue": { - "fields": { - "~": { - "booleanValue": true - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "`*`.`~`" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge-nested.json b/firestore/tests/unit/v1/testdata/set-merge-nested.json deleted file mode 100644 index 8ebec8fda277..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge-nested.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Merge with a nested field", - "comment": "A merge option where the field is not at top level.\nOnly fields mentioned in the option are present in the update operation.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "h", - "g" - ] - } - ] - }, - "jsonData": "{\"h\": {\"g\": 4, \"f\": 5}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "h": { - "mapValue": { - "fields": { - "g": { - "integerValue": "4" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "h.g" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge-nonleaf.json b/firestore/tests/unit/v1/testdata/set-merge-nonleaf.json deleted file mode 100644 index d115e12c2abd..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge-nonleaf.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Merge field is not a leaf", - "comment": "If a field path is in a merge option, the value at that path\nreplaces the stored value. That is true even if the value is complex.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "h" - ] - } - ] - }, - "jsonData": "{\"h\": {\"f\": 5, \"g\": 6}, \"e\": 7}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "h": { - "mapValue": { - "fields": { - "f": { - "integerValue": "5" - }, - "g": { - "integerValue": "6" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "h" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge-prefix.json b/firestore/tests/unit/v1/testdata/set-merge-prefix.json deleted file mode 100644 index a09e4db50985..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge-prefix.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: One merge path cannot be the prefix of another", - "comment": "The prefix would make the other path meaningless, so this is\nprobably a programming error.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "a", - "b" - ] - } - ] - }, - "jsonData": "{\"a\": {\"b\": 1}}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge-present.json b/firestore/tests/unit/v1/testdata/set-merge-present.json deleted file mode 100644 index b501b23d03f5..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge-present.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Merge fields must all be present in data", - "comment": "The client signals an error if a merge option mentions a path\nthat is not in the input data.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "b" - ] - }, - { - "field": [ - "a" - ] - } - ] - }, - "jsonData": "{\"a\": 1}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-merge.json b/firestore/tests/unit/v1/testdata/set-merge.json deleted file mode 100644 index 8ce730e840ad..000000000000 --- a/firestore/tests/unit/v1/testdata/set-merge.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: Merge with a field", - "comment": "Fields in the input data but not in a merge option are pruned.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": 2}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-mergeall-empty.json b/firestore/tests/unit/v1/testdata/set-mergeall-empty.json deleted file mode 100644 index e541ad8c9a7d..000000000000 --- a/firestore/tests/unit/v1/testdata/set-mergeall-empty.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "tests": [ - { - "description": "set: MergeAll can be specified with empty data.", - "comment": "This is a valid call that can be used to ensure a document exists.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": {} - }, - "updateMask": { - "fieldPaths": [] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-mergeall-nested.json b/firestore/tests/unit/v1/testdata/set-mergeall-nested.json deleted file mode 100644 index c70ec691e29a..000000000000 --- a/firestore/tests/unit/v1/testdata/set-mergeall-nested.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "tests": [ - { - "description": "set: MergeAll with nested fields", - "comment": "MergeAll with nested fields results in an update mask that\nincludes entries for all the leaf fields.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{\"h\": { \"g\": 3, \"f\": 4 }}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "h": { - "mapValue": { - "fields": { - "f": { - "integerValue": "4" - }, - "g": { - "integerValue": "3" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "h.f", - "h.g" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-mergeall.json b/firestore/tests/unit/v1/testdata/set-mergeall.json deleted file mode 100644 index 55a2377cb51d..000000000000 --- a/firestore/tests/unit/v1/testdata/set-mergeall.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "set: MergeAll", - "comment": "The MergeAll option with a simple piece of data.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{\"a\": 1, \"b\": 2}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - }, - "b": { - "integerValue": "2" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-nodel.json b/firestore/tests/unit/v1/testdata/set-nodel.json deleted file mode 100644 index 5580bc04f64c..000000000000 --- a/firestore/tests/unit/v1/testdata/set-nodel.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: Delete cannot appear in data", - "comment": "The Delete sentinel cannot be used in Create, or in Set without a Merge option.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"Delete\"}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-nosplit.json b/firestore/tests/unit/v1/testdata/set-nosplit.json deleted file mode 100644 index 3866027b9b58..000000000000 --- a/firestore/tests/unit/v1/testdata/set-nosplit.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "tests": [ - { - "description": "set: don’t split on dots", - "comment": "Create and Set treat their map keys literally. They do not split on dots.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a.b": { - "mapValue": { - "fields": { - "c.d": { - "integerValue": "1" - } - } - } - }, - "e": { - "integerValue": "2" - } - } - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-special-chars.json b/firestore/tests/unit/v1/testdata/set-special-chars.json deleted file mode 100644 index 865ffcd9dc76..000000000000 --- a/firestore/tests/unit/v1/testdata/set-special-chars.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "tests": [ - { - "description": "set: non-alpha characters in map keys", - "comment": "Create and Set treat their map keys literally. They do not escape special characters.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{ \"*\": { \".\": 1 }, \"~\": 2 }", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "*": { - "mapValue": { - "fields": { - ".": { - "integerValue": "1" - } - } - } - }, - "~": { - "integerValue": "2" - } - } - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-alone-mergeall.json b/firestore/tests/unit/v1/testdata/set-st-alone-mergeall.json deleted file mode 100644 index d95bf0973b79..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-alone-mergeall.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp alone with MergeAll", - "comment": "If the only values in the input are ServerTimestamps, then no\nupdate operation should be produced.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{\"a\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-alone.json b/firestore/tests/unit/v1/testdata/set-st-alone.json deleted file mode 100644 index 3fe931394b0e..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-alone.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp alone", - "comment": "If the only values in the input are ServerTimestamps, then\nan update operation with an empty map should be produced.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": {} - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-merge-both.json b/firestore/tests/unit/v1/testdata/set-st-merge-both.json deleted file mode 100644 index a39ada55f738..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-merge-both.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: ServerTimestamp with Merge of both fields", - "comment": "Just as when no merge option is specified, ServerTimestamp\nsentinel values are removed from the data in the update operation and become\ntransforms.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf-alone.json b/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf-alone.json deleted file mode 100644 index 4193b00ea683..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf-alone.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: non-leaf merge field with ServerTimestamp alone", - "comment": "If a field path is in a merge option, the value at that path\nreplaces the stored value. If the value has only ServerTimestamps, they become transforms\nand we clear the value by including the field path in the update mask.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "h" - ] - } - ] - }, - "jsonData": "{\"h\": {\"g\": \"ServerTimestamp\"}, \"e\": 7}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d" - }, - "updateMask": { - "fieldPaths": [ - "h" - ] - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "h.g", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf.json b/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf.json deleted file mode 100644 index 5e91d663b8c6..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-merge-nonleaf.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: non-leaf merge field with ServerTimestamp", - "comment": "If a field path is in a merge option, the value at that path\nreplaces the stored value, and ServerTimestamps inside that value become transforms\nas usual.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "h" - ] - } - ] - }, - "jsonData": "{\"h\": {\"f\": 5, \"g\": \"ServerTimestamp\"}, \"e\": 7}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "h": { - "mapValue": { - "fields": { - "f": { - "integerValue": "5" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "h" - ] - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "h.g", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-merge-nowrite.json b/firestore/tests/unit/v1/testdata/set-st-merge-nowrite.json deleted file mode 100644 index 08fa8b52f54b..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-merge-nowrite.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: If no ordinary values in Merge, no write", - "comment": "If all the fields in the merge option have ServerTimestamp\nvalues, then no update operation is produced, only a transform.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "b" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-mergeall.json b/firestore/tests/unit/v1/testdata/set-st-mergeall.json deleted file mode 100644 index 26883c03820d..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-mergeall.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp with MergeAll", - "comment": "Just as when no merge option is specified, ServerTimestamp\nsentinel values are removed from the data in the update operation and become\ntransforms.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "all": true - }, - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-multi.json b/firestore/tests/unit/v1/testdata/set-st-multi.json deleted file mode 100644 index 23c06f4976f7..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-multi.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "tests": [ - { - "description": "set: multiple ServerTimestamp fields", - "comment": "A document can have more than one ServerTimestamp field.\nSince all the ServerTimestamp fields are removed, the only field in the update is \"a\".", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c.d", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-nested.json b/firestore/tests/unit/v1/testdata/set-st-nested.json deleted file mode 100644 index 5c94c33f943d..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-nested.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "tests": [ - { - "description": "set: nested ServerTimestamp field", - "comment": "A ServerTimestamp value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-noarray-nested.json b/firestore/tests/unit/v1/testdata/set-st-noarray-nested.json deleted file mode 100644 index 5ad6a50897ba..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ServerTimestamp sentinel. Firestore transforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-noarray.json b/firestore/tests/unit/v1/testdata/set-st-noarray.json deleted file mode 100644 index 76a2881cb61b..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp cannot be in an array value", - "comment": "The ServerTimestamp sentinel must be the value of a field. Firestore\ntransforms don't support array indexing.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"ServerTimestamp\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-nomerge.json b/firestore/tests/unit/v1/testdata/set-st-nomerge.json deleted file mode 100644 index 0523ed74fb44..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-nomerge.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "tests": [ - { - "description": "set-merge: If is ServerTimestamp not in Merge, no transform", - "comment": "If the ServerTimestamp value is not mentioned in a merge option,\nthen it is pruned from the data but does not result in a transform.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "option": { - "fields": [ - { - "field": [ - "a" - ] - } - ] - }, - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st-with-empty-map.json b/firestore/tests/unit/v1/testdata/set-st-with-empty-map.json deleted file mode 100644 index 063c94a0e6cd..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st-with-empty-map.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp beside an empty map", - "comment": "When a ServerTimestamp and a map both reside inside a map, the\nServerTimestamp should be stripped out but the empty map should remain.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "mapValue": { - "fields": {} - } - } - } - } - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/set-st.json b/firestore/tests/unit/v1/testdata/set-st.json deleted file mode 100644 index 42f2b14f1c7f..000000000000 --- a/firestore/tests/unit/v1/testdata/set-st.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "tests": [ - { - "description": "set: ServerTimestamp with data", - "comment": "A key with the special ServerTimestamp sentinel is removed from\nthe data in the update operation. Instead it appears in a separate Transform operation.\nNote that in these tests, the string \"ServerTimestamp\" should be replaced with the\nspecial ServerTimestamp value.", - "set": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-all-transforms.json b/firestore/tests/unit/v1/testdata/update-all-transforms.json deleted file mode 100644 index 6f6a725df0fc..000000000000 --- a/firestore/tests/unit/v1/testdata/update-all-transforms.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "tests": [ - { - "description": "update: all transforms in a single call", - "comment": "A document can be created with any amount of transforms.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-alone.json b/firestore/tests/unit/v1/testdata/update-arrayremove-alone.json deleted file mode 100644 index 86fc8802e52e..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-alone.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayRemove alone", - "comment": "If the only values in the input are ArrayRemove, then no\nupdate operation should be produced.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayRemove\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-multi.json b/firestore/tests/unit/v1/testdata/update-arrayremove-multi.json deleted file mode 100644 index df880f6792b9..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-multi.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "tests": [ - { - "description": "update: multiple ArrayRemove fields", - "comment": "A document can have more than one ArrayRemove field.\nSince all the ArrayRemove fields are removed, the only field in the update is \"a\".", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-nested.json b/firestore/tests/unit/v1/testdata/update-arrayremove-nested.json deleted file mode 100644 index 28d59aff661f..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-nested.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "tests": [ - { - "description": "update: nested ArrayRemove field", - "comment": "An ArrayRemove value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-arrayremove-noarray-nested.json deleted file mode 100644 index 842c5fe3240c..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayRemove cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayRemove. Firestore transforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-noarray.json b/firestore/tests/unit/v1/testdata/update-arrayremove-noarray.json deleted file mode 100644 index 0a371f055488..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayRemove cannot be in an array value", - "comment": "ArrayRemove must be the value of a field. Firestore\ntransforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove-with-st.json b/firestore/tests/unit/v1/testdata/update-arrayremove-with-st.json deleted file mode 100644 index 9d110de9caea..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayremove.json b/firestore/tests/unit/v1/testdata/update-arrayremove.json deleted file mode 100644 index d925704db63b..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayremove.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayRemove with data", - "comment": "A key with ArrayRemove is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-alone.json b/firestore/tests/unit/v1/testdata/update-arrayunion-alone.json deleted file mode 100644 index 757ea48c3b7f..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-alone.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayUnion alone", - "comment": "If the only values in the input are ArrayUnion, then no\nupdate operation should be produced.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayUnion\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-multi.json b/firestore/tests/unit/v1/testdata/update-arrayunion-multi.json deleted file mode 100644 index 3aafcd0f3545..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-multi.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "tests": [ - { - "description": "update: multiple ArrayUnion fields", - "comment": "A document can have more than one ArrayUnion field.\nSince all the ArrayUnion fields are removed, the only field in the update is \"a\".", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "appendMissingElements": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-nested.json b/firestore/tests/unit/v1/testdata/update-arrayunion-nested.json deleted file mode 100644 index f2bf3770dc77..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-nested.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "tests": [ - { - "description": "update: nested ArrayUnion field", - "comment": "An ArrayUnion value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-arrayunion-noarray-nested.json deleted file mode 100644 index 08745a08b07b..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayUnion cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayUnion. Firestore transforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-noarray.json b/firestore/tests/unit/v1/testdata/update-arrayunion-noarray.json deleted file mode 100644 index 284f42800eba..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayUnion cannot be in an array value", - "comment": "ArrayUnion must be the value of a field. Firestore\ntransforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion-with-st.json b/firestore/tests/unit/v1/testdata/update-arrayunion-with-st.json deleted file mode 100644 index 1c47591e29bc..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion-with-st.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-arrayunion.json b/firestore/tests/unit/v1/testdata/update-arrayunion.json deleted file mode 100644 index 60192c9f8c0b..000000000000 --- a/firestore/tests/unit/v1/testdata/update-arrayunion.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "tests": [ - { - "description": "update: ArrayUnion with data", - "comment": "A key with ArrayUnion is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-badchar.json b/firestore/tests/unit/v1/testdata/update-badchar.json deleted file mode 100644 index 7d5e6e4f07bc..000000000000 --- a/firestore/tests/unit/v1/testdata/update-badchar.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: invalid character", - "comment": "The keys of the data given to Update are interpreted, unlike those of Create and Set. They cannot contain special characters.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a~b\": 1}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-basic.json b/firestore/tests/unit/v1/testdata/update-basic.json deleted file mode 100644 index f864247427e8..000000000000 --- a/firestore/tests/unit/v1/testdata/update-basic.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "tests": [ - { - "description": "update: basic", - "comment": "A simple call, resulting in a single update operation.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-complex.json b/firestore/tests/unit/v1/testdata/update-complex.json deleted file mode 100644 index ddf8373367c4..000000000000 --- a/firestore/tests/unit/v1/testdata/update-complex.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "tests": [ - { - "description": "update: complex", - "comment": "A call to a write method with complicated input data.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "arrayValue": { - "values": [ - { - "integerValue": "1" - }, - { - "doubleValue": 2.5 - } - ] - } - }, - "b": { - "mapValue": { - "fields": { - "c": { - "arrayValue": { - "values": [ - { - "stringValue": "three" - }, - { - "mapValue": { - "fields": { - "d": { - "booleanValue": true - } - } - } - } - ] - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del-alone.json b/firestore/tests/unit/v1/testdata/update-del-alone.json deleted file mode 100644 index 45598ab40220..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del-alone.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete alone", - "comment": "If the input data consists solely of Deletes, then the update\noperation has no map, just an update mask.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": \"Delete\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d" - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del-dot.json b/firestore/tests/unit/v1/testdata/update-del-dot.json deleted file mode 100644 index 44f36b0c3e85..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del-dot.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete with a dotted field", - "comment": "After expanding top-level dotted fields, fields with Delete\nvalues are pruned from the output data, but appear in the update mask.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b.c\": \"Delete\", \"b.d\": 2}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - }, - "b": { - "mapValue": { - "fields": { - "d": { - "integerValue": "2" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b.c", - "b.d" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del-nested.json b/firestore/tests/unit/v1/testdata/update-del-nested.json deleted file mode 100644 index 18d08f3f004e..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete cannot be nested", - "comment": "The Delete sentinel must be the value of a top-level key.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": {\"b\": \"Delete\"}}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-del-noarray-nested.json deleted file mode 100644 index 025cbed0dfb3..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete cannot be anywhere inside an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are implemented\nby turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not support\narray indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"Delete\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del-noarray.json b/firestore/tests/unit/v1/testdata/update-del-noarray.json deleted file mode 100644 index dce3806f2c35..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete cannot be in an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are\nimplemented by turning the path to the Delete sentinel into a FieldPath, and FieldPaths\ndo not support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"Delete\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-del.json b/firestore/tests/unit/v1/testdata/update-del.json deleted file mode 100644 index 26a6a1bc7e43..000000000000 --- a/firestore/tests/unit/v1/testdata/update-del.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "tests": [ - { - "description": "update: Delete", - "comment": "If a field's value is the Delete sentinel, then it doesn't appear\nin the update data, but does in the mask.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"Delete\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-exists-precond.json b/firestore/tests/unit/v1/testdata/update-exists-precond.json deleted file mode 100644 index bdbe274b4c23..000000000000 --- a/firestore/tests/unit/v1/testdata/update-exists-precond.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "tests": [ - { - "description": "update: Exists precondition is invalid", - "comment": "The Update method does not support an explicit exists precondition.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "exists": true - }, - "jsonData": "{\"a\": 1}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-fp-empty-component.json b/firestore/tests/unit/v1/testdata/update-fp-empty-component.json deleted file mode 100644 index 50274e49ffe2..000000000000 --- a/firestore/tests/unit/v1/testdata/update-fp-empty-component.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: empty field path component", - "comment": "Empty fields are not allowed.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a..b\": 1}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-nested-transform-and-nested-value.json b/firestore/tests/unit/v1/testdata/update-nested-transform-and-nested-value.json deleted file mode 100644 index ff7bfc6ee944..000000000000 --- a/firestore/tests/unit/v1/testdata/update-nested-transform-and-nested-value.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "tests": [ - { - "description": "update: Nested transforms should not affect the field mask, even\nwhen there are other values that do. Transforms should only affect the\nDocumentTransform_FieldTransform list.", - "comment": "For updates, top-level paths in json-like map inputs\nare split on the dot. That is, an input {\"a.b.c\": 7} results in an update to\nfield c of object b of object a with value 7. In order to specify this behavior,\nthe update must use a fieldmask \"a.b.c\". However, fieldmasks are only used for\nconcrete values - transforms are separately encoded in a\nDocumentTransform_FieldTransform array.\n\nThis test exercises a bug found in python (https://github.com/googleapis/google-cloud-python/issues/7215)\nin which nested transforms ({\"a.c\": \"ServerTimestamp\"}) next to nested values\n({\"a.b\": 7}) incorrectly caused the fieldmask \"a\" to be set, which has the\neffect of wiping out all data in \"a\" other than what was specified in the\njson-like input.\n\nInstead, as this test specifies, transforms should not affect the fieldmask.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a.b\": 7, \"a.c\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "integerValue": "7" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a.b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-no-paths.json b/firestore/tests/unit/v1/testdata/update-no-paths.json deleted file mode 100644 index 6cfbc01dce20..000000000000 --- a/firestore/tests/unit/v1/testdata/update-no-paths.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: no paths", - "comment": "It is a client-side error to call Update with empty data.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-all-transforms.json b/firestore/tests/unit/v1/testdata/update-paths-all-transforms.json deleted file mode 100644 index 01a4c1143dc1..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-all-transforms.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: all transforms in a single call", - "comment": "A document can be created with any amount of transforms.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "c" - ] - }, - { - "field": [ - "d" - ] - } - ], - "jsonValues": [ - "1", - "\"ServerTimestamp\"", - "[\"ArrayUnion\", 1, 2, 3]", - "[\"ArrayRemove\", 4, 5, 6]" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-alone.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-alone.json deleted file mode 100644 index 9bc8a1440137..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-alone.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayRemove alone", - "comment": "If the only values in the input are ArrayRemove, then no\nupdate operation should be produced.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[\"ArrayRemove\", 1, 2, 3]" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-multi.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-multi.json deleted file mode 100644 index 9a8547120e3a..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-multi.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: multiple ArrayRemove fields", - "comment": "A document can have more than one ArrayRemove field.\nSince all the ArrayRemove fields are removed, the only field in the update is \"a\".", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "c" - ] - } - ], - "jsonValues": [ - "1", - "[\"ArrayRemove\", 1, 2, 3]", - "{\"d\": [\"ArrayRemove\", 4, 5, 6]}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "removeAllFromArray": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-nested.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-nested.json deleted file mode 100644 index e7f952ec3423..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-nested.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: nested ArrayRemove field", - "comment": "An ArrayRemove value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "{\"c\": [\"ArrayRemove\", 1, 2, 3]}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray-nested.json deleted file mode 100644 index b669e870cd31..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray-nested.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayRemove cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayRemove. Firestore transforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray.json deleted file mode 100644 index ff50e11e4fb2..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-noarray.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayRemove cannot be in an array value", - "comment": "ArrayRemove must be the value of a field. Firestore\ntransforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, 2, [\"ArrayRemove\", 1, 2, 3]]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-with-st.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove-with-st.json deleted file mode 100644 index d27d26e44664..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove-with-st.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[\"ArrayRemove\", 1, \"ServerTimestamp\", 3]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayremove.json b/firestore/tests/unit/v1/testdata/update-paths-arrayremove.json deleted file mode 100644 index 673a2ca2c1af..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayremove.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayRemove with data", - "comment": "A key with ArrayRemove is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "[\"ArrayRemove\", 1, 2, 3]" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "removeAllFromArray": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-alone.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-alone.json deleted file mode 100644 index 81e1e9771ab7..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-alone.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayUnion alone", - "comment": "If the only values in the input are ArrayUnion, then no\nupdate operation should be produced.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[\"ArrayUnion\", 1, 2, 3]" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-multi.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-multi.json deleted file mode 100644 index ef421bdad180..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-multi.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: multiple ArrayUnion fields", - "comment": "A document can have more than one ArrayUnion field.\nSince all the ArrayUnion fields are removed, the only field in the update is \"a\".", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "c" - ] - } - ], - "jsonValues": [ - "1", - "[\"ArrayUnion\", 1, 2, 3]", - "{\"d\": [\"ArrayUnion\", 4, 5, 6]}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - }, - { - "fieldPath": "c.d", - "appendMissingElements": { - "values": [ - { - "integerValue": "4" - }, - { - "integerValue": "5" - }, - { - "integerValue": "6" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-nested.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-nested.json deleted file mode 100644 index 2d73527a4048..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-nested.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: nested ArrayUnion field", - "comment": "An ArrayUnion value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "{\"c\": [\"ArrayUnion\", 1, 2, 3]}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray-nested.json deleted file mode 100644 index 0e8a634a4417..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray-nested.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayUnion cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ArrayUnion. Firestore transforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray.json deleted file mode 100644 index ce45841888fa..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-noarray.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayUnion cannot be in an array value", - "comment": "ArrayUnion must be the value of a field. Firestore\ntransforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, 2, [\"ArrayRemove\", 1, 2, 3]]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-with-st.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion-with-st.json deleted file mode 100644 index c0a4204182cd..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion-with-st.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: The ServerTimestamp sentinel cannot be in an ArrayUnion", - "comment": "The ServerTimestamp sentinel must be the value of a field. It may\nnot appear in an ArrayUnion.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[\"ArrayUnion\", 1, \"ServerTimestamp\", 3]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-arrayunion.json b/firestore/tests/unit/v1/testdata/update-paths-arrayunion.json deleted file mode 100644 index 1401993d059d..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-arrayunion.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ArrayUnion with data", - "comment": "A key with ArrayUnion is removed from the data in the update \noperation. Instead it appears in a separate Transform operation.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "[\"ArrayUnion\", 1, 2, 3]" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "appendMissingElements": { - "values": [ - { - "integerValue": "1" - }, - { - "integerValue": "2" - }, - { - "integerValue": "3" - } - ] - } - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-basic.json b/firestore/tests/unit/v1/testdata/update-paths-basic.json deleted file mode 100644 index bf1164ac410d..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-basic.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: basic", - "comment": "A simple call, resulting in a single update operation.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "1" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-complex.json b/firestore/tests/unit/v1/testdata/update-paths-complex.json deleted file mode 100644 index 2f3faa7846c6..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-complex.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: complex", - "comment": "A call to a write method with complicated input data.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "[1, 2.5]", - "{\"c\": [\"three\", {\"d\": true}]}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "arrayValue": { - "values": [ - { - "integerValue": "1" - }, - { - "doubleValue": 2.5 - } - ] - } - }, - "b": { - "mapValue": { - "fields": { - "c": { - "arrayValue": { - "values": [ - { - "stringValue": "three" - }, - { - "mapValue": { - "fields": { - "d": { - "booleanValue": true - } - } - } - } - ] - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-del-alone.json b/firestore/tests/unit/v1/testdata/update-paths-del-alone.json deleted file mode 100644 index e3368c86c376..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-del-alone.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Delete alone", - "comment": "If the input data consists solely of Deletes, then the update\noperation has no map, just an update mask.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "\"Delete\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d" - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-del-nested.json b/firestore/tests/unit/v1/testdata/update-paths-del-nested.json deleted file mode 100644 index 07f9f405ea40..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-del-nested.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Delete cannot be nested", - "comment": "The Delete sentinel must be the value of a top-level key.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "{\"b\": \"Delete\"}" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-del-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-paths-del-noarray-nested.json deleted file mode 100644 index a74c0aeb570c..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-del-noarray-nested.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Delete cannot be anywhere inside an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are implemented\nby turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not support\narray indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, {\"b\": \"Delete\"}]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-del-noarray.json b/firestore/tests/unit/v1/testdata/update-paths-del-noarray.json deleted file mode 100644 index fb6d00b72400..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-del-noarray.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Delete cannot be in an array value", - "comment": "The Delete sentinel must be the value of a field. Deletes are\nimplemented by turning the path to the Delete sentinel into a FieldPath, and FieldPaths\ndo not support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, 2, \"Delete\"]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-del.json b/firestore/tests/unit/v1/testdata/update-paths-del.json deleted file mode 100644 index cb5f6bedf41e..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-del.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Delete", - "comment": "If a field's value is the Delete sentinel, then it doesn't appear\nin the update data, but does in the mask.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "\"Delete\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-exists-precond.json b/firestore/tests/unit/v1/testdata/update-paths-exists-precond.json deleted file mode 100644 index d495db033010..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-exists-precond.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Exists precondition is invalid", - "comment": "The Update method does not support an explicit exists precondition.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "exists": true - }, - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "1" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-del.json b/firestore/tests/unit/v1/testdata/update-paths-fp-del.json deleted file mode 100644 index 95b787a91363..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-del.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: field paths with delete", - "comment": "If one nested field is deleted, and another isn't, preserve the second.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "foo", - "bar" - ] - }, - { - "field": [ - "foo", - "delete" - ] - } - ], - "jsonValues": [ - "1", - "\"Delete\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "foo": { - "mapValue": { - "fields": { - "bar": { - "integerValue": "1" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "foo.bar", - "foo.delete" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-dup-transforms.json b/firestore/tests/unit/v1/testdata/update-paths-fp-dup-transforms.json deleted file mode 100644 index aff02a8d2036..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-dup-transforms.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: duplicate field path with only transforms", - "comment": "The same field cannot occur more than once, even if all the operations are transforms.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[\"ArrayUnion\", 1, 2, 3]", - "\"ServerTimestamp\"", - "[\"ArrayUnion\", 4, 5, 6]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-dup.json b/firestore/tests/unit/v1/testdata/update-paths-fp-dup.json deleted file mode 100644 index 71bf4d54a2a4..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-dup.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: duplicate field path", - "comment": "The same field cannot occur more than once.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "1", - "2", - "3" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-empty-component.json b/firestore/tests/unit/v1/testdata/update-paths-fp-empty-component.json deleted file mode 100644 index 161e9f6eff9e..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-empty-component.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: empty field path component", - "comment": "Empty fields are not allowed.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "*", - "" - ] - } - ], - "jsonValues": [ - "1" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-empty.json b/firestore/tests/unit/v1/testdata/update-paths-fp-empty.json deleted file mode 100644 index 9424da130565..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-empty.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: empty field path", - "comment": "A FieldPath of length zero is invalid.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [] - } - ], - "jsonValues": [ - "1" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-multi.json b/firestore/tests/unit/v1/testdata/update-paths-fp-multi.json deleted file mode 100644 index a0afd38b8f26..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-multi.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: multiple-element field path", - "comment": "The UpdatePaths or equivalent method takes a list of FieldPaths.\nEach FieldPath is a sequence of uninterpreted path components.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a", - "b" - ] - } - ], - "jsonValues": [ - "1" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "integerValue": "1" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a.b" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-fp-nosplit.json b/firestore/tests/unit/v1/testdata/update-paths-fp-nosplit.json deleted file mode 100644 index 23e9ddc9d3ad..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-fp-nosplit.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: FieldPath elements are not split on dots", - "comment": "FieldPath components are not split on dots.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a.b", - "f.g" - ] - } - ], - "jsonValues": [ - "{\"n.o\": 7}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a.b": { - "mapValue": { - "fields": { - "f.g": { - "mapValue": { - "fields": { - "n.o": { - "integerValue": "7" - } - } - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "`a.b`.`f.g`" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-nested-transform-and-nested-value.json b/firestore/tests/unit/v1/testdata/update-paths-nested-transform-and-nested-value.json deleted file mode 100644 index 927d783aee46..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-nested-transform-and-nested-value.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: Nested transforms should not affect the field mask, even\nwhen there are other values that do. Transforms should only affect the\nDocumentTransform_FieldTransform list.", - "comment": "For updates, top-level paths in json-like map inputs\nare split on the dot. That is, an input {\"a.b.c\": 7} results in an update to\nfield c of object b of object a with value 7. In order to specify this behavior,\nthe update must use a fieldmask \"a.b.c\". However, fieldmasks are only used for\nconcrete values - transforms are separately encoded in a\nDocumentTransform_FieldTransform array.\n\nThis test exercises a bug found in python (https://github.com/googleapis/google-cloud-python/issues/7215)\nin which nested transforms ({\"a.c\": \"ServerTimestamp\"}) next to nested values\n({\"a.b\": 7}) incorrectly caused the fieldmask \"a\" to be set, which has the\neffect of wiping out all data in \"a\" other than what was specified in the\njson-like input.\n\nInstead, as this test specifies, transforms should not affect the fieldmask.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a", - "b" - ] - }, - { - "field": [ - "a", - "c" - ] - } - ], - "jsonValues": [ - "7", - "\"ServerTimestamp\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "integerValue": "7" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a.b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-no-paths.json b/firestore/tests/unit/v1/testdata/update-paths-no-paths.json deleted file mode 100644 index e8ad035eaf13..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-no-paths.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: no paths", - "comment": "It is a client-side error to call Update with empty data.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-prefix-1.json b/firestore/tests/unit/v1/testdata/update-paths-prefix-1.json deleted file mode 100644 index 0bc1c0e812de..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-prefix-1.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: prefix #1", - "comment": "In the input data, one field cannot be a prefix of another.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a", - "b" - ] - }, - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "1", - "2" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-prefix-2.json b/firestore/tests/unit/v1/testdata/update-paths-prefix-2.json deleted file mode 100644 index 6f1d152a7077..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-prefix-2.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: prefix #2", - "comment": "In the input data, one field cannot be a prefix of another.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "a", - "b" - ] - } - ], - "jsonValues": [ - "1", - "2" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-prefix-3.json b/firestore/tests/unit/v1/testdata/update-paths-prefix-3.json deleted file mode 100644 index 4fe17b292f6a..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-prefix-3.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: prefix #3", - "comment": "In the input data, one field cannot be a prefix of another, even if the values could in principle be combined.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "a", - "d" - ] - } - ], - "jsonValues": [ - "{\"b\": 1}", - "2" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-special-chars.json b/firestore/tests/unit/v1/testdata/update-paths-special-chars.json deleted file mode 100644 index 83b27d8dbfde..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-special-chars.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: special characters", - "comment": "FieldPaths can contain special characters.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "*", - "~" - ] - }, - { - "field": [ - "*", - "`" - ] - } - ], - "jsonValues": [ - "1", - "2" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "*": { - "mapValue": { - "fields": { - "`": { - "integerValue": "2" - }, - "~": { - "integerValue": "1" - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "`*`.`\\``", - "`*`.`~`" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-alone.json b/firestore/tests/unit/v1/testdata/update-paths-st-alone.json deleted file mode 100644 index 085d04987713..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-alone.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ServerTimestamp alone", - "comment": "If the only values in the input are ServerTimestamps, then no\nupdate operation should be produced.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "\"ServerTimestamp\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "setToServerValue": "REQUEST_TIME" - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-multi.json b/firestore/tests/unit/v1/testdata/update-paths-st-multi.json deleted file mode 100644 index 2d813801ac33..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-multi.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: multiple ServerTimestamp fields", - "comment": "A document can have more than one ServerTimestamp field.\nSince all the ServerTimestamp fields are removed, the only field in the update is \"a\".", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - }, - { - "field": [ - "c" - ] - } - ], - "jsonValues": [ - "1", - "\"ServerTimestamp\"", - "{\"d\": \"ServerTimestamp\"}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c.d", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-nested.json b/firestore/tests/unit/v1/testdata/update-paths-st-nested.json deleted file mode 100644 index 8bd35c9111b1..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-nested.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: nested ServerTimestamp field", - "comment": "A ServerTimestamp value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "{\"c\": \"ServerTimestamp\"}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-paths-st-noarray-nested.json deleted file mode 100644 index 2dd1bcacc775..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-noarray-nested.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ServerTimestamp cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ServerTimestamp sentinel. Firestore transforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, {\"b\": \"ServerTimestamp\"}]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-noarray.json b/firestore/tests/unit/v1/testdata/update-paths-st-noarray.json deleted file mode 100644 index 5da60306bc25..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-noarray.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ServerTimestamp cannot be in an array value", - "comment": "The ServerTimestamp sentinel must be the value of a field. Firestore\ntransforms don't support array indexing.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "[1, 2, \"ServerTimestamp\"]" - ], - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st-with-empty-map.json b/firestore/tests/unit/v1/testdata/update-paths-st-with-empty-map.json deleted file mode 100644 index ac60b2771d37..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st-with-empty-map.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ServerTimestamp beside an empty map", - "comment": "When a ServerTimestamp and a map both reside inside a map, the\nServerTimestamp should be stripped out but the empty map should remain.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "{\"b\": {}, \"c\": \"ServerTimestamp\"}" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "mapValue": { - "fields": {} - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-st.json b/firestore/tests/unit/v1/testdata/update-paths-st.json deleted file mode 100644 index 011405b9bf7b..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-st.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: ServerTimestamp with data", - "comment": "A key with the special ServerTimestamp sentinel is removed from\nthe data in the update operation. Instead it appears in a separate Transform operation.\nNote that in these tests, the string \"ServerTimestamp\" should be replaced with the\nspecial ServerTimestamp value.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "fieldPaths": [ - { - "field": [ - "a" - ] - }, - { - "field": [ - "b" - ] - } - ], - "jsonValues": [ - "1", - "\"ServerTimestamp\"" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-paths-uptime.json b/firestore/tests/unit/v1/testdata/update-paths-uptime.json deleted file mode 100644 index 96801a0cd8e7..000000000000 --- a/firestore/tests/unit/v1/testdata/update-paths-uptime.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "update-paths: last-update-time precondition", - "comment": "The Update call supports a last-update-time precondition.", - "updatePaths": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "updateTime": "1970-01-01T00:00:42Z" - }, - "fieldPaths": [ - { - "field": [ - "a" - ] - } - ], - "jsonValues": [ - "1" - ], - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "updateTime": "1970-01-01T00:00:42Z" - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-prefix-1.json b/firestore/tests/unit/v1/testdata/update-prefix-1.json deleted file mode 100644 index faad69d140bc..000000000000 --- a/firestore/tests/unit/v1/testdata/update-prefix-1.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: prefix #1", - "comment": "In the input data, one field cannot be a prefix of another.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a.b\": 1, \"a\": 2}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-prefix-2.json b/firestore/tests/unit/v1/testdata/update-prefix-2.json deleted file mode 100644 index 96545c134867..000000000000 --- a/firestore/tests/unit/v1/testdata/update-prefix-2.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: prefix #2", - "comment": "In the input data, one field cannot be a prefix of another.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"a.b\": 2}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-prefix-3.json b/firestore/tests/unit/v1/testdata/update-prefix-3.json deleted file mode 100644 index 95f7024966c7..000000000000 --- a/firestore/tests/unit/v1/testdata/update-prefix-3.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: prefix #3", - "comment": "In the input data, one field cannot be a prefix of another, even if the values could in principle be combined.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": {\"b\": 1}, \"a.d\": 2}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-quoting.json b/firestore/tests/unit/v1/testdata/update-quoting.json deleted file mode 100644 index 10e3c35c22ca..000000000000 --- a/firestore/tests/unit/v1/testdata/update-quoting.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "update: non-letter starting chars are quoted, except underscore", - "comment": "In a field path, any component beginning with a non-letter or underscore is quoted.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"_0.1.+2\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "_0": { - "mapValue": { - "fields": { - "1": { - "mapValue": { - "fields": { - "+2": { - "integerValue": "1" - } - } - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "_0.`1`.`+2`" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-split-top-level.json b/firestore/tests/unit/v1/testdata/update-split-top-level.json deleted file mode 100644 index eddf360d3731..000000000000 --- a/firestore/tests/unit/v1/testdata/update-split-top-level.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "update: Split on dots for top-level keys only", - "comment": "The Update method splits only top-level keys at dots. Keys at\nother levels are taken literally.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"h.g\": {\"j.k\": 6}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "h": { - "mapValue": { - "fields": { - "g": { - "mapValue": { - "fields": { - "j.k": { - "integerValue": "6" - } - } - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "h.g" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-split.json b/firestore/tests/unit/v1/testdata/update-split.json deleted file mode 100644 index e18c78bf6e61..000000000000 --- a/firestore/tests/unit/v1/testdata/update-split.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "update: split on dots", - "comment": "The Update method splits top-level keys at dots.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a.b.c\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "mapValue": { - "fields": { - "c": { - "integerValue": "1" - } - } - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a.b.c" - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-alone.json b/firestore/tests/unit/v1/testdata/update-st-alone.json deleted file mode 100644 index 1a333f30cbb6..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-alone.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp alone", - "comment": "If the only values in the input are ServerTimestamps, then no\nupdate operation should be produced.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a", - "setToServerValue": "REQUEST_TIME" - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-dot.json b/firestore/tests/unit/v1/testdata/update-st-dot.json deleted file mode 100644 index 83422ca5271f..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-dot.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp with dotted field", - "comment": "Like other uses of ServerTimestamp, the data is pruned and the\nfield does not appear in the update mask, because it is in the transform. In this case\nAn update operation is produced just to hold the precondition.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a.b.c\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.b.c", - "setToServerValue": "REQUEST_TIME" - } - ] - }, - "currentDocument": { - "exists": true - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-multi.json b/firestore/tests/unit/v1/testdata/update-st-multi.json deleted file mode 100644 index 8105ec27f543..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-multi.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "tests": [ - { - "description": "update: multiple ServerTimestamp fields", - "comment": "A document can have more than one ServerTimestamp field.\nSince all the ServerTimestamp fields are removed, the only field in the update is \"a\".", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "c" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - }, - { - "fieldPath": "c.d", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-nested.json b/firestore/tests/unit/v1/testdata/update-st-nested.json deleted file mode 100644 index 5a8e73237c34..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-nested.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "tests": [ - { - "description": "update: nested ServerTimestamp field", - "comment": "A ServerTimestamp value can occur at any depth. In this case,\nthe transform applies to the field path \"b.c\". Since \"c\" is removed from the update,\n\"b\" becomes empty, so it is also removed from the update.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a", - "b" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-noarray-nested.json b/firestore/tests/unit/v1/testdata/update-st-noarray-nested.json deleted file mode 100644 index 9f94501aa7fb..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-noarray-nested.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp cannot be anywhere inside an array value", - "comment": "There cannot be an array value anywhere on the path from the document\nroot to the ServerTimestamp sentinel. Firestore transforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-noarray.json b/firestore/tests/unit/v1/testdata/update-st-noarray.json deleted file mode 100644 index 02615bd3ceb2..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-noarray.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp cannot be in an array value", - "comment": "The ServerTimestamp sentinel must be the value of a field. Firestore\ntransforms don't support array indexing.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": [1, 2, \"ServerTimestamp\"]}", - "isError": true - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st-with-empty-map.json b/firestore/tests/unit/v1/testdata/update-st-with-empty-map.json deleted file mode 100644 index abeceb03ea8e..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st-with-empty-map.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp beside an empty map", - "comment": "When a ServerTimestamp and a map both reside inside a map, the\nServerTimestamp should be stripped out but the empty map should remain.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "mapValue": { - "fields": { - "b": { - "mapValue": { - "fields": {} - } - } - } - } - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "a.c", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-st.json b/firestore/tests/unit/v1/testdata/update-st.json deleted file mode 100644 index 6249d8bda90d..000000000000 --- a/firestore/tests/unit/v1/testdata/update-st.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "tests": [ - { - "description": "update: ServerTimestamp with data", - "comment": "A key with the special ServerTimestamp sentinel is removed from\nthe data in the update operation. Instead it appears in a separate Transform operation.\nNote that in these tests, the string \"ServerTimestamp\" should be replaced with the\nspecial ServerTimestamp value.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "jsonData": "{\"a\": 1, \"b\": \"ServerTimestamp\"}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "exists": true - } - }, - { - "transform": { - "document": "projects/projectID/databases/(default)/documents/C/d", - "fieldTransforms": [ - { - "fieldPath": "b", - "setToServerValue": "REQUEST_TIME" - } - ] - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1/testdata/update-uptime.json b/firestore/tests/unit/v1/testdata/update-uptime.json deleted file mode 100644 index 9210a2cf0328..000000000000 --- a/firestore/tests/unit/v1/testdata/update-uptime.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "tests": [ - { - "description": "update: last-update-time precondition", - "comment": "The Update call supports a last-update-time precondition.", - "update": { - "docRefPath": "projects/projectID/databases/(default)/documents/C/d", - "precondition": { - "updateTime": "1970-01-01T00:00:42Z" - }, - "jsonData": "{\"a\": 1}", - "request": { - "database": "projects/projectID/databases/(default)", - "writes": [ - { - "update": { - "name": "projects/projectID/databases/(default)/documents/C/d", - "fields": { - "a": { - "integerValue": "1" - } - } - }, - "updateMask": { - "fieldPaths": [ - "a" - ] - }, - "currentDocument": { - "updateTime": "1970-01-01T00:00:42Z" - } - } - ] - } - } - } - ] -} diff --git a/firestore/tests/unit/v1beta1/__init__.py b/firestore/tests/unit/v1beta1/__init__.py deleted file mode 100644 index ab6729095248..000000000000 --- a/firestore/tests/unit/v1beta1/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/firestore/tests/unit/v1beta1/test__helpers.py b/firestore/tests/unit/v1beta1/test__helpers.py deleted file mode 100644 index 3059482cd07a..000000000000 --- a/firestore/tests/unit/v1beta1/test__helpers.py +++ /dev/null @@ -1,2089 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import sys -import unittest - -import mock -import pytest - - -class TestGeoPoint(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1._helpers import GeoPoint - - return GeoPoint - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - lat = 81.25 - lng = 359.984375 - geo_pt = self._make_one(lat, lng) - self.assertEqual(geo_pt.latitude, lat) - self.assertEqual(geo_pt.longitude, lng) - - def test_to_protobuf(self): - from google.type import latlng_pb2 - - lat = 0.015625 - lng = 20.03125 - geo_pt = self._make_one(lat, lng) - result = geo_pt.to_protobuf() - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - self.assertEqual(result, geo_pt_pb) - - def test___eq__(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = self._make_one(lat, lng) - self.assertEqual(geo_pt1, geo_pt2) - - def test___eq__type_differ(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = object() - self.assertNotEqual(geo_pt1, geo_pt2) - self.assertIs(geo_pt1.__eq__(geo_pt2), NotImplemented) - - def test___ne__same_value(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = self._make_one(lat, lng) - comparison_val = geo_pt1 != geo_pt2 - self.assertFalse(comparison_val) - - def test___ne__(self): - geo_pt1 = self._make_one(0.0, 1.0) - geo_pt2 = self._make_one(2.0, 3.0) - self.assertNotEqual(geo_pt1, geo_pt2) - - def test___ne__type_differ(self): - lat = 0.015625 - lng = 20.03125 - geo_pt1 = self._make_one(lat, lng) - geo_pt2 = object() - self.assertNotEqual(geo_pt1, geo_pt2) - self.assertIs(geo_pt1.__ne__(geo_pt2), NotImplemented) - - -class Test_verify_path(unittest.TestCase): - @staticmethod - def _call_fut(path, is_collection): - from google.cloud.firestore_v1beta1._helpers import verify_path - - return verify_path(path, is_collection) - - def test_empty(self): - path = () - with self.assertRaises(ValueError): - self._call_fut(path, True) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_wrong_length_collection(self): - path = ("foo", "bar") - with self.assertRaises(ValueError): - self._call_fut(path, True) - - def test_wrong_length_document(self): - path = ("Kind",) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_wrong_type_collection(self): - path = (99, "ninety-nine", "zap") - with self.assertRaises(ValueError): - self._call_fut(path, True) - - def test_wrong_type_document(self): - path = ("Users", "Ada", "Candy", {}) - with self.assertRaises(ValueError): - self._call_fut(path, False) - - def test_success_collection(self): - path = ("Computer", "Magic", "Win") - ret_val = self._call_fut(path, True) - # NOTE: We are just checking that it didn't fail. - self.assertIsNone(ret_val) - - def test_success_document(self): - path = ("Tokenizer", "Seventeen", "Cheese", "Burger") - ret_val = self._call_fut(path, False) - # NOTE: We are just checking that it didn't fail. - self.assertIsNone(ret_val) - - -class Test_encode_value(unittest.TestCase): - @staticmethod - def _call_fut(value): - from google.cloud.firestore_v1beta1._helpers import encode_value - - return encode_value(value) - - def test_none(self): - from google.protobuf import struct_pb2 - - result = self._call_fut(None) - expected = _value_pb(null_value=struct_pb2.NULL_VALUE) - self.assertEqual(result, expected) - - def test_boolean(self): - result = self._call_fut(True) - expected = _value_pb(boolean_value=True) - self.assertEqual(result, expected) - - def test_integer(self): - value = 425178 - result = self._call_fut(value) - expected = _value_pb(integer_value=value) - self.assertEqual(result, expected) - - def test_float(self): - value = 123.4453125 - result = self._call_fut(value) - expected = _value_pb(double_value=value) - self.assertEqual(result, expected) - - def test_datetime_with_nanos(self): - from google.api_core.datetime_helpers import DatetimeWithNanoseconds - from google.protobuf import timestamp_pb2 - - dt_seconds = 1488768504 - dt_nanos = 458816991 - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb) - - result = self._call_fut(dt_val) - expected = _value_pb(timestamp_value=timestamp_pb) - self.assertEqual(result, expected) - - def test_datetime_wo_nanos(self): - from google.protobuf import timestamp_pb2 - - dt_seconds = 1488768504 - dt_nanos = 458816000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos) - - result = self._call_fut(dt_val) - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - expected = _value_pb(timestamp_value=timestamp_pb) - self.assertEqual(result, expected) - - def test_string(self): - value = u"\u2018left quote, right quote\u2019" - result = self._call_fut(value) - expected = _value_pb(string_value=value) - self.assertEqual(result, expected) - - def test_bytes(self): - value = b"\xe3\xf2\xff\x00" - result = self._call_fut(value) - expected = _value_pb(bytes_value=value) - self.assertEqual(result, expected) - - def test_reference_value(self): - client = _make_client() - - value = client.document("my", "friend") - result = self._call_fut(value) - expected = _value_pb(reference_value=value._document_path) - self.assertEqual(result, expected) - - def test_geo_point(self): - from google.cloud.firestore_v1beta1._helpers import GeoPoint - - value = GeoPoint(50.5, 88.75) - result = self._call_fut(value) - expected = _value_pb(geo_point_value=value.to_protobuf()) - self.assertEqual(result, expected) - - def test_array(self): - from google.cloud.firestore_v1beta1.proto.document_pb2 import ArrayValue - - result = self._call_fut([99, True, 118.5]) - - array_pb = ArrayValue( - values=[ - _value_pb(integer_value=99), - _value_pb(boolean_value=True), - _value_pb(double_value=118.5), - ] - ) - expected = _value_pb(array_value=array_pb) - self.assertEqual(result, expected) - - def test_map(self): - from google.cloud.firestore_v1beta1.proto.document_pb2 import MapValue - - result = self._call_fut({"abc": 285, "def": b"piglatin"}) - - map_pb = MapValue( - fields={ - "abc": _value_pb(integer_value=285), - "def": _value_pb(bytes_value=b"piglatin"), - } - ) - expected = _value_pb(map_value=map_pb) - self.assertEqual(result, expected) - - def test_bad_type(self): - value = object() - with self.assertRaises(TypeError): - self._call_fut(value) - - -class Test_encode_dict(unittest.TestCase): - @staticmethod - def _call_fut(values_dict): - from google.cloud.firestore_v1beta1._helpers import encode_dict - - return encode_dict(values_dict) - - def test_many_types(self): - from google.protobuf import struct_pb2 - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto.document_pb2 import ArrayValue - from google.cloud.firestore_v1beta1.proto.document_pb2 import MapValue - - dt_seconds = 1497397225 - dt_nanos = 465964000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp(dt_seconds + 1e-9 * dt_nanos) - - client = _make_client() - document = client.document("most", "adjective", "thing", "here") - - values_dict = { - "foo": None, - "bar": True, - "baz": 981, - "quux": 2.875, - "quuz": dt_val, - "corge": u"\N{snowman}", - "grault": b"\xe2\x98\x83", - "wibble": document, - "garply": [u"fork", 4.0], - "waldo": {"fred": u"zap", "thud": False}, - } - encoded_dict = self._call_fut(values_dict) - expected_dict = { - "foo": _value_pb(null_value=struct_pb2.NULL_VALUE), - "bar": _value_pb(boolean_value=True), - "baz": _value_pb(integer_value=981), - "quux": _value_pb(double_value=2.875), - "quuz": _value_pb( - timestamp_value=timestamp_pb2.Timestamp( - seconds=dt_seconds, nanos=dt_nanos - ) - ), - "corge": _value_pb(string_value=u"\N{snowman}"), - "grault": _value_pb(bytes_value=b"\xe2\x98\x83"), - "wibble": _value_pb(reference_value=document._document_path), - "garply": _value_pb( - array_value=ArrayValue( - values=[ - _value_pb(string_value=u"fork"), - _value_pb(double_value=4.0), - ] - ) - ), - "waldo": _value_pb( - map_value=MapValue( - fields={ - "fred": _value_pb(string_value=u"zap"), - "thud": _value_pb(boolean_value=False), - } - ) - ), - } - self.assertEqual(encoded_dict, expected_dict) - - -class Test_reference_value_to_document(unittest.TestCase): - @staticmethod - def _call_fut(reference_value, client): - from google.cloud.firestore_v1beta1._helpers import reference_value_to_document - - return reference_value_to_document(reference_value, client) - - def test_bad_format(self): - from google.cloud.firestore_v1beta1._helpers import BAD_REFERENCE_ERROR - - reference_value = "not/the/right/format" - with self.assertRaises(ValueError) as exc_info: - self._call_fut(reference_value, None) - - err_msg = BAD_REFERENCE_ERROR.format(reference_value) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_same_client(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - client = _make_client() - document = client.document("that", "this") - reference_value = document._document_path - - new_document = self._call_fut(reference_value, client) - self.assertIsNot(new_document, document) - - self.assertIsInstance(new_document, DocumentReference) - self.assertIs(new_document._client, client) - self.assertEqual(new_document._path, document._path) - - def test_different_client(self): - from google.cloud.firestore_v1beta1._helpers import WRONG_APP_REFERENCE - - client1 = _make_client(project="kirk") - document = client1.document("tin", "foil") - reference_value = document._document_path - - client2 = _make_client(project="spock") - with self.assertRaises(ValueError) as exc_info: - self._call_fut(reference_value, client2) - - err_msg = WRONG_APP_REFERENCE.format(reference_value, client2._database_string) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class Test_decode_value(unittest.TestCase): - @staticmethod - def _call_fut(value, client=mock.sentinel.client): - from google.cloud.firestore_v1beta1._helpers import decode_value - - return decode_value(value, client) - - def test_none(self): - from google.protobuf import struct_pb2 - - value = _value_pb(null_value=struct_pb2.NULL_VALUE) - self.assertIsNone(self._call_fut(value)) - - def test_bool(self): - value1 = _value_pb(boolean_value=True) - self.assertTrue(self._call_fut(value1)) - value2 = _value_pb(boolean_value=False) - self.assertFalse(self._call_fut(value2)) - - def test_int(self): - int_val = 29871 - value = _value_pb(integer_value=int_val) - self.assertEqual(self._call_fut(value), int_val) - - def test_float(self): - float_val = 85.9296875 - value = _value_pb(double_value=float_val) - self.assertEqual(self._call_fut(value), float_val) - - @unittest.skipIf( - (3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python" - ) - def test_datetime(self): - from google.api_core.datetime_helpers import DatetimeWithNanoseconds - from google.protobuf import timestamp_pb2 - - dt_seconds = 552855006 - dt_nanos = 766961828 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=dt_seconds, nanos=dt_nanos) - value = _value_pb(timestamp_value=timestamp_pb) - - expected_dt_val = DatetimeWithNanoseconds.from_timestamp_pb(timestamp_pb) - self.assertEqual(self._call_fut(value), expected_dt_val) - - def test_unicode(self): - unicode_val = u"zorgon" - value = _value_pb(string_value=unicode_val) - self.assertEqual(self._call_fut(value), unicode_val) - - def test_bytes(self): - bytes_val = b"abc\x80" - value = _value_pb(bytes_value=bytes_val) - self.assertEqual(self._call_fut(value), bytes_val) - - def test_reference(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - client = _make_client() - path = (u"then", u"there-was-one") - document = client.document(*path) - ref_string = document._document_path - value = _value_pb(reference_value=ref_string) - - result = self._call_fut(value, client) - self.assertIsInstance(result, DocumentReference) - self.assertIs(result._client, client) - self.assertEqual(result._path, path) - - def test_geo_point(self): - from google.cloud.firestore_v1beta1._helpers import GeoPoint - - geo_pt = GeoPoint(latitude=42.5, longitude=99.0625) - value = _value_pb(geo_point_value=geo_pt.to_protobuf()) - self.assertEqual(self._call_fut(value), geo_pt) - - def test_array(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - - sub_value1 = _value_pb(boolean_value=True) - sub_value2 = _value_pb(double_value=14.1396484375) - sub_value3 = _value_pb(bytes_value=b"\xde\xad\xbe\xef") - array_pb = document_pb2.ArrayValue(values=[sub_value1, sub_value2, sub_value3]) - value = _value_pb(array_value=array_pb) - - expected = [ - sub_value1.boolean_value, - sub_value2.double_value, - sub_value3.bytes_value, - ] - self.assertEqual(self._call_fut(value), expected) - - def test_map(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - - sub_value1 = _value_pb(integer_value=187680) - sub_value2 = _value_pb(string_value=u"how low can you go?") - map_pb = document_pb2.MapValue( - fields={"first": sub_value1, "second": sub_value2} - ) - value = _value_pb(map_value=map_pb) - - expected = { - "first": sub_value1.integer_value, - "second": sub_value2.string_value, - } - self.assertEqual(self._call_fut(value), expected) - - def test_nested_map(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - - actual_value1 = 1009876 - actual_value2 = u"hey you guys" - actual_value3 = 90.875 - map_pb1 = document_pb2.MapValue( - fields={ - "lowest": _value_pb(integer_value=actual_value1), - "aside": _value_pb(string_value=actual_value2), - } - ) - map_pb2 = document_pb2.MapValue( - fields={ - "middle": _value_pb(map_value=map_pb1), - "aside": _value_pb(boolean_value=True), - } - ) - map_pb3 = document_pb2.MapValue( - fields={ - "highest": _value_pb(map_value=map_pb2), - "aside": _value_pb(double_value=actual_value3), - } - ) - value = _value_pb(map_value=map_pb3) - - expected = { - "highest": { - "middle": {"lowest": actual_value1, "aside": actual_value2}, - "aside": True, - }, - "aside": actual_value3, - } - self.assertEqual(self._call_fut(value), expected) - - def test_unset_value_type(self): - with self.assertRaises(ValueError): - self._call_fut(_value_pb()) - - def test_unknown_value_type(self): - value_pb = mock.Mock(spec=["WhichOneof"]) - value_pb.WhichOneof.return_value = "zoob_value" - - with self.assertRaises(ValueError): - self._call_fut(value_pb) - - value_pb.WhichOneof.assert_called_once_with("value_type") - - -class Test_decode_dict(unittest.TestCase): - @staticmethod - def _call_fut(value_fields, client=mock.sentinel.client): - from google.cloud.firestore_v1beta1._helpers import decode_dict - - return decode_dict(value_fields, client) - - @unittest.skipIf( - (3,) <= sys.version_info < (3, 4, 4), "known datetime bug (bpo-23517) in Python" - ) - def test_many_types(self): - from google.protobuf import struct_pb2 - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto.document_pb2 import ArrayValue - from google.cloud.firestore_v1beta1.proto.document_pb2 import MapValue - from google.cloud._helpers import UTC - from google.cloud.firestore_v1beta1.field_path import FieldPath - - dt_seconds = 1394037350 - dt_nanos = 667285000 - # Make sure precision is valid in microseconds too. - self.assertEqual(dt_nanos % 1000, 0) - dt_val = datetime.datetime.utcfromtimestamp( - dt_seconds + 1e-9 * dt_nanos - ).replace(tzinfo=UTC) - - value_fields = { - "foo": _value_pb(null_value=struct_pb2.NULL_VALUE), - "bar": _value_pb(boolean_value=True), - "baz": _value_pb(integer_value=981), - "quux": _value_pb(double_value=2.875), - "quuz": _value_pb( - timestamp_value=timestamp_pb2.Timestamp( - seconds=dt_seconds, nanos=dt_nanos - ) - ), - "corge": _value_pb(string_value=u"\N{snowman}"), - "grault": _value_pb(bytes_value=b"\xe2\x98\x83"), - "garply": _value_pb( - array_value=ArrayValue( - values=[ - _value_pb(string_value=u"fork"), - _value_pb(double_value=4.0), - ] - ) - ), - "waldo": _value_pb( - map_value=MapValue( - fields={ - "fred": _value_pb(string_value=u"zap"), - "thud": _value_pb(boolean_value=False), - } - ) - ), - FieldPath("a", "b", "c").to_api_repr(): _value_pb(boolean_value=False), - } - expected = { - "foo": None, - "bar": True, - "baz": 981, - "quux": 2.875, - "quuz": dt_val, - "corge": u"\N{snowman}", - "grault": b"\xe2\x98\x83", - "garply": [u"fork", 4.0], - "waldo": {"fred": u"zap", "thud": False}, - "a.b.c": False, - } - self.assertEqual(self._call_fut(value_fields), expected) - - -class Test_get_doc_id(unittest.TestCase): - @staticmethod - def _call_fut(document_pb, expected_prefix): - from google.cloud.firestore_v1beta1._helpers import get_doc_id - - return get_doc_id(document_pb, expected_prefix) - - @staticmethod - def _dummy_ref_string(collection_id): - from google.cloud.firestore_v1beta1.client import DEFAULT_DATABASE - - project = u"bazzzz" - return u"projects/{}/databases/{}/documents/{}".format( - project, DEFAULT_DATABASE, collection_id - ) - - def test_success(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - - prefix = self._dummy_ref_string("sub-collection") - actual_id = "this-is-the-one" - name = "{}/{}".format(prefix, actual_id) - - document_pb = document_pb2.Document(name=name) - document_id = self._call_fut(document_pb, prefix) - self.assertEqual(document_id, actual_id) - - def test_failure(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - - actual_prefix = self._dummy_ref_string("the-right-one") - wrong_prefix = self._dummy_ref_string("the-wrong-one") - name = "{}/{}".format(actual_prefix, "sorry-wont-works") - - document_pb = document_pb2.Document(name=name) - with self.assertRaises(ValueError) as exc_info: - self._call_fut(document_pb, wrong_prefix) - - exc_args = exc_info.exception.args - self.assertEqual(len(exc_args), 4) - self.assertEqual(exc_args[1], name) - self.assertEqual(exc_args[3], wrong_prefix) - - -class Test_extract_fields(unittest.TestCase): - @staticmethod - def _call_fut(document_data, prefix_path, expand_dots=False): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.extract_fields( - document_data, prefix_path, expand_dots=expand_dots - ) - - def test_w_empty_document(self): - from google.cloud.firestore_v1beta1._helpers import _EmptyDict - - document_data = {} - prefix_path = _make_field_path() - expected = [(_make_field_path(), _EmptyDict)] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_invalid_key_and_expand_dots(self): - document_data = {"b": 1, "a~d": 2, "c": 3} - prefix_path = _make_field_path() - - with self.assertRaises(ValueError): - list(self._call_fut(document_data, prefix_path, expand_dots=True)) - - def test_w_shallow_keys(self): - document_data = {"b": 1, "a": 2, "c": 3} - prefix_path = _make_field_path() - expected = [ - (_make_field_path("a"), 2), - (_make_field_path("b"), 1), - (_make_field_path("c"), 3), - ] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_nested(self): - from google.cloud.firestore_v1beta1._helpers import _EmptyDict - - document_data = {"b": {"a": {"d": 4, "c": 3, "g": {}}, "e": 7}, "f": 5} - prefix_path = _make_field_path() - expected = [ - (_make_field_path("b", "a", "c"), 3), - (_make_field_path("b", "a", "d"), 4), - (_make_field_path("b", "a", "g"), _EmptyDict), - (_make_field_path("b", "e"), 7), - (_make_field_path("f"), 5), - ] - - iterator = self._call_fut(document_data, prefix_path) - self.assertEqual(list(iterator), expected) - - def test_w_expand_dotted(self): - from google.cloud.firestore_v1beta1._helpers import _EmptyDict - - document_data = { - "b": {"a": {"d": 4, "c": 3, "g": {}, "k.l.m": 17}, "e": 7}, - "f": 5, - "h.i.j": 9, - } - prefix_path = _make_field_path() - expected = [ - (_make_field_path("b", "a", "c"), 3), - (_make_field_path("b", "a", "d"), 4), - (_make_field_path("b", "a", "g"), _EmptyDict), - (_make_field_path("b", "a", "k.l.m"), 17), - (_make_field_path("b", "e"), 7), - (_make_field_path("f"), 5), - (_make_field_path("h", "i", "j"), 9), - ] - - iterator = self._call_fut(document_data, prefix_path, expand_dots=True) - self.assertEqual(list(iterator), expected) - - -class Test_set_field_value(unittest.TestCase): - @staticmethod - def _call_fut(document_data, field_path, value): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.set_field_value(document_data, field_path, value) - - def test_normal_value_w_shallow(self): - document = {} - field_path = _make_field_path("a") - value = 3 - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": 3}) - - def test_normal_value_w_nested(self): - document = {} - field_path = _make_field_path("a", "b", "c") - value = 3 - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {"b": {"c": 3}}}) - - def test_empty_dict_w_shallow(self): - from google.cloud.firestore_v1beta1._helpers import _EmptyDict - - document = {} - field_path = _make_field_path("a") - value = _EmptyDict - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {}}) - - def test_empty_dict_w_nested(self): - from google.cloud.firestore_v1beta1._helpers import _EmptyDict - - document = {} - field_path = _make_field_path("a", "b", "c") - value = _EmptyDict - - self._call_fut(document, field_path, value) - - self.assertEqual(document, {"a": {"b": {"c": {}}}}) - - -class Test_get_field_value(unittest.TestCase): - @staticmethod - def _call_fut(document_data, field_path): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.get_field_value(document_data, field_path) - - def test_w_empty_path(self): - document = {} - - with self.assertRaises(ValueError): - self._call_fut(document, _make_field_path()) - - def test_miss_shallow(self): - document = {} - - with self.assertRaises(KeyError): - self._call_fut(document, _make_field_path("nonesuch")) - - def test_miss_nested(self): - document = {"a": {"b": {}}} - - with self.assertRaises(KeyError): - self._call_fut(document, _make_field_path("a", "b", "c")) - - def test_hit_shallow(self): - document = {"a": 1} - - self.assertEqual(self._call_fut(document, _make_field_path("a")), 1) - - def test_hit_nested(self): - document = {"a": {"b": {"c": 1}}} - - self.assertEqual(self._call_fut(document, _make_field_path("a", "b", "c")), 1) - - -class TestDocumentExtractor(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.DocumentExtractor - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertTrue(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_delete_field_shallow(self): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - document_data = {"a": DELETE_FIELD} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, [_make_field_path("a")]) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_delete_field_nested(self): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - document_data = {"a": {"b": {"c": DELETE_FIELD}}} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, [_make_field_path("a", "b", "c")]) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_server_timestamp_shallow(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_data = {"a": SERVER_TIMESTAMP} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, [_make_field_path("a")]) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_server_timestamp_nested(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_data = {"a": {"b": {"c": SERVER_TIMESTAMP}}} - - inst = self._make_one(document_data) - - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, [_make_field_path("a", "b", "c")]) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_array_remove_shallow(self): - from google.cloud.firestore_v1beta1.transforms import ArrayRemove - - values = [1, 3, 5] - document_data = {"a": ArrayRemove(values)} - - inst = self._make_one(document_data) - - expected_array_removes = {_make_field_path("a"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_array_remove_nested(self): - from google.cloud.firestore_v1beta1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayRemove(values)}}} - - inst = self._make_one(document_data) - - expected_array_removes = {_make_field_path("a", "b", "c"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_array_union_shallow(self): - from google.cloud.firestore_v1beta1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = {"a": ArrayUnion(values)} - - inst = self._make_one(document_data) - - expected_array_unions = {_make_field_path("a"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a")]) - - def test_ctor_w_array_union_nested(self): - from google.cloud.firestore_v1beta1.transforms import ArrayUnion - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayUnion(values)}}} - - inst = self._make_one(document_data) - - expected_array_unions = {_make_field_path("a", "b", "c"): values} - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, []) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertEqual(inst.set_fields, {}) - self.assertFalse(inst.empty_document) - self.assertTrue(inst.has_transforms) - self.assertEqual(inst.transform_paths, [_make_field_path("a", "b", "c")]) - - def test_ctor_w_empty_dict_shallow(self): - document_data = {"a": {}} - - inst = self._make_one(document_data) - - expected_field_paths = [_make_field_path("a")] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_empty_dict_nested(self): - document_data = {"a": {"b": {"c": {}}}} - - inst = self._make_one(document_data) - - expected_field_paths = [_make_field_path("a", "b", "c")] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - self.assertEqual(inst.transform_paths, []) - - def test_ctor_w_normal_value_shallow(self): - document_data = {"b": 1, "a": 2, "c": 3} - - inst = self._make_one(document_data) - - expected_field_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - - def test_ctor_w_normal_value_nested(self): - document_data = {"b": {"a": {"d": 4, "c": 3}, "e": 7}, "f": 5} - - inst = self._make_one(document_data) - - expected_field_paths = [ - _make_field_path("b", "a", "c"), - _make_field_path("b", "a", "d"), - _make_field_path("b", "e"), - _make_field_path("f"), - ] - self.assertEqual(inst.document_data, document_data) - self.assertEqual(inst.field_paths, expected_field_paths) - self.assertEqual(inst.deleted_fields, []) - self.assertEqual(inst.server_timestamps, []) - self.assertEqual(inst.array_removes, {}) - self.assertEqual(inst.array_unions, {}) - self.assertEqual(inst.set_fields, document_data) - self.assertFalse(inst.empty_document) - self.assertFalse(inst.has_transforms) - - def test_get_update_pb_w_exists_precondition(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - - document_data = {} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - update_pb = inst.get_update_pb(document_path, exists=False) - - self.assertIsInstance(update_pb, write_pb2.Write) - self.assertEqual(update_pb.update.name, document_path) - self.assertEqual(update_pb.update.fields, document_data) - self.assertTrue(update_pb.HasField("current_document")) - self.assertFalse(update_pb.current_document.exists) - - def test_get_update_pb_wo_exists_precondition(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1._helpers import encode_dict - - document_data = {"a": 1} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - update_pb = inst.get_update_pb(document_path) - - self.assertIsInstance(update_pb, write_pb2.Write) - self.assertEqual(update_pb.update.name, document_path) - self.assertEqual(update_pb.update.fields, encode_dict(document_data)) - self.assertFalse(update_pb.HasField("current_document")) - - def test_get_transform_pb_w_server_timestamp_w_exists_precondition(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1beta1._helpers import REQUEST_TIME_ENUM - - document_data = {"a": SERVER_TIMESTAMP} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path, exists=False) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a") - self.assertEqual(transform.set_to_server_value, REQUEST_TIME_ENUM) - self.assertTrue(transform_pb.HasField("current_document")) - self.assertFalse(transform_pb.current_document.exists) - - def test_get_transform_pb_w_server_timestamp_wo_exists_precondition(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1beta1._helpers import REQUEST_TIME_ENUM - - document_data = {"a": {"b": {"c": SERVER_TIMESTAMP}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - self.assertEqual(transform.set_to_server_value, REQUEST_TIME_ENUM) - self.assertFalse(transform_pb.HasField("current_document")) - - @staticmethod - def _array_value_to_list(array_value): - from google.cloud.firestore_v1beta1._helpers import decode_value - - return [decode_value(element, client=None) for element in array_value.values] - - def test_get_transform_pb_w_array_remove(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = {"a": {"b": {"c": ArrayRemove(values)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - removed = self._array_value_to_list(transform.remove_all_from_array) - self.assertEqual(removed, values) - self.assertFalse(transform_pb.HasField("current_document")) - - def test_get_transform_pb_w_array_union(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = {"a": {"b": {"c": ArrayUnion(values)}}} - inst = self._make_one(document_data) - document_path = ( - "projects/project-id/databases/(default)/" "documents/document-id" - ) - - transform_pb = inst.get_transform_pb(document_path) - - self.assertIsInstance(transform_pb, write_pb2.Write) - self.assertEqual(transform_pb.transform.document, document_path) - transforms = transform_pb.transform.field_transforms - self.assertEqual(len(transforms), 1) - transform = transforms[0] - self.assertEqual(transform.field_path, "a.b.c") - added = self._array_value_to_list(transform.append_missing_elements) - self.assertEqual(added, values) - self.assertFalse(transform_pb.HasField("current_document")) - - -class Test_pbs_for_create(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data): - from google.cloud.firestore_v1beta1._helpers import pbs_for_create - - return pbs_for_create(document_path, document_data) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1._helpers import encode_dict - from google.cloud.firestore_v1beta1.proto import common_pb2 - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)), - current_document=common_pb2.Precondition(exists=False), - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - def _helper(self, do_transform=False, empty_val=False): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - if do_transform: - document_data["butter"] = SERVER_TIMESTAMP - - if empty_val: - document_data["mustard"] = {} - - write_pbs = self._call_fut(document_path, document_data) - - if empty_val: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True, mustard={} - ) - else: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True - ) - expected_pbs = [update_pb] - - if do_transform: - expected_pbs.append( - self._make_write_w_transform(document_path, fields=["butter"]) - ) - - self.assertEqual(write_pbs, expected_pbs) - - def test_without_transform(self): - self._helper() - - def test_w_transform(self): - self._helper(do_transform=True) - - def test_w_transform_and_empty_value(self): - self._helper(do_transform=True, empty_val=True) - - -class Test_pbs_for_set_no_merge(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.pbs_for_set_no_merge(document_path, document_data) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1._helpers import encode_dict - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)) - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - def test_w_empty_document(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {} - - write_pbs = self._call_fut(document_path, document_data) - - update_pb = self._make_write_w_document(document_path) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_w_only_server_timestamp(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"butter": SERVER_TIMESTAMP} - - write_pbs = self._call_fut(document_path, document_data) - - update_pb = self._make_write_w_document(document_path) - transform_pb = self._make_write_w_transform(document_path, ["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def _helper(self, do_transform=False, empty_val=False): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - if do_transform: - document_data["butter"] = SERVER_TIMESTAMP - - if empty_val: - document_data["mustard"] = {} - - write_pbs = self._call_fut(document_path, document_data) - - if empty_val: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True, mustard={} - ) - else: - update_pb = self._make_write_w_document( - document_path, cheese=1.5, crackers=True - ) - expected_pbs = [update_pb] - - if do_transform: - expected_pbs.append( - self._make_write_w_transform(document_path, fields=["butter"]) - ) - - self.assertEqual(write_pbs, expected_pbs) - - def test_defaults(self): - self._helper() - - def test_w_transform(self): - self._helper(do_transform=True) - - def test_w_transform_and_empty_value(self): - # Exercise #5944 - self._helper(do_transform=True, empty_val=True) - - -class TestDocumentExtractorForMerge(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.DocumentExtractorForMerge - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - - self.assertEqual(inst.data_merge, []) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, []) - - def test_apply_merge_all_w_empty_document(self): - document_data = {} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - self.assertEqual(inst.data_merge, []) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, []) - self.assertFalse(inst.has_updates) - - def test_apply_merge_all_w_delete(self): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - document_data = {"write_me": "value", "delete_me": DELETE_FIELD} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - expected_data_merge = [ - _make_field_path("delete_me"), - _make_field_path("write_me"), - ] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, []) - self.assertEqual(inst.merge, expected_data_merge) - self.assertTrue(inst.has_updates) - - def test_apply_merge_all_w_server_timestamp(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_data = {"write_me": "value", "timestamp": SERVER_TIMESTAMP} - inst = self._make_one(document_data) - - inst.apply_merge(True) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("timestamp")] - expected_merge = [_make_field_path("timestamp"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_empty_document(self): - document_data = {} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["nonesuch", "or.this"]) - - def test_apply_merge_list_fields_w_unmerged_delete(self): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - document_data = { - "write_me": "value", - "delete_me": DELETE_FIELD, - "ignore_me": 123, - "unmerged_delete": DELETE_FIELD, - } - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["write_me", "delete_me"]) - - def test_apply_merge_list_fields_w_delete(self): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - document_data = { - "write_me": "value", - "delete_me": DELETE_FIELD, - "ignore_me": 123, - } - inst = self._make_one(document_data) - - inst.apply_merge(["write_me", "delete_me"]) - - expected_set_fields = {"write_me": "value"} - expected_deleted_fields = [_make_field_path("delete_me")] - self.assertEqual(inst.set_fields, expected_set_fields) - self.assertEqual(inst.deleted_fields, expected_deleted_fields) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_prefixes(self): - - document_data = {"a": {"b": {"c": 123}}} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["a", "a.b"]) - - def test_apply_merge_list_fields_w_missing_data_string_paths(self): - - document_data = {"write_me": "value", "ignore_me": 123} - inst = self._make_one(document_data) - - with self.assertRaises(ValueError): - inst.apply_merge(["write_me", "nonesuch"]) - - def test_apply_merge_list_fields_w_non_merge_field(self): - - document_data = {"write_me": "value", "ignore_me": 123} - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me")]) - - expected_set_fields = {"write_me": "value"} - self.assertEqual(inst.set_fields, expected_set_fields) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_server_timestamp(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_data = { - "write_me": "value", - "timestamp": SERVER_TIMESTAMP, - "ignored_stamp": SERVER_TIMESTAMP, - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("timestamp")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("timestamp")] - expected_merge = [_make_field_path("timestamp"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_server_timestamps = [_make_field_path("timestamp")] - self.assertEqual(inst.server_timestamps, expected_server_timestamps) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_array_remove(self): - from google.cloud.firestore_v1beta1.transforms import ArrayRemove - - values = [2, 4, 8] - document_data = { - "write_me": "value", - "remove_me": ArrayRemove(values), - "ignored_remove_me": ArrayRemove((1, 3, 5)), - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("remove_me")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("remove_me")] - expected_merge = [_make_field_path("remove_me"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_array_removes = {_make_field_path("remove_me"): values} - self.assertEqual(inst.array_removes, expected_array_removes) - self.assertTrue(inst.has_updates) - - def test_apply_merge_list_fields_w_array_union(self): - from google.cloud.firestore_v1beta1.transforms import ArrayUnion - - values = [1, 3, 5] - document_data = { - "write_me": "value", - "union_me": ArrayUnion(values), - "ignored_union_me": ArrayUnion((2, 4, 8)), - } - inst = self._make_one(document_data) - - inst.apply_merge([_make_field_path("write_me"), _make_field_path("union_me")]) - - expected_data_merge = [_make_field_path("write_me")] - expected_transform_merge = [_make_field_path("union_me")] - expected_merge = [_make_field_path("union_me"), _make_field_path("write_me")] - self.assertEqual(inst.data_merge, expected_data_merge) - self.assertEqual(inst.transform_merge, expected_transform_merge) - self.assertEqual(inst.merge, expected_merge) - expected_array_unions = {_make_field_path("union_me"): values} - self.assertEqual(inst.array_unions, expected_array_unions) - self.assertTrue(inst.has_updates) - - -class Test_pbs_for_set_with_merge(unittest.TestCase): - @staticmethod - def _call_fut(document_path, document_data, merge): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.pbs_for_set_with_merge( - document_path, document_data, merge=merge - ) - - @staticmethod - def _make_write_w_document(document_path, **data): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1._helpers import encode_dict - - return write_pb2.Write( - update=document_pb2.Document(name=document_path, fields=encode_dict(data)) - ) - - @staticmethod - def _make_write_w_transform(document_path, fields): - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.gapic import enums - - server_val = enums.DocumentTransform.FieldTransform.ServerValue - transforms = [ - write_pb2.DocumentTransform.FieldTransform( - field_path=field, set_to_server_value=server_val.REQUEST_TIME - ) - for field in fields - ] - - return write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, field_transforms=transforms - ) - ) - - @staticmethod - def _update_document_mask(update_pb, field_paths): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - update_pb.update_mask.CopyFrom( - common_pb2.DocumentMask(field_paths=sorted(field_paths)) - ) - - def test_with_merge_true_wo_transform(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - write_pbs = self._call_fut(document_path, document_data, merge=True) - - update_pb = self._make_write_w_document(document_path, **document_data) - self._update_document_mask(update_pb, field_paths=sorted(document_data)) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_wo_transform(self): - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - document_data = {"cheese": 1.5, "crackers": True} - - write_pbs = self._call_fut(document_path, document_data, merge=["cheese"]) - - update_pb = self._make_write_w_document( - document_path, cheese=document_data["cheese"] - ) - self._update_document_mask(update_pb, field_paths=["cheese"]) - expected_pbs = [update_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_true_w_transform(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = SERVER_TIMESTAMP - - write_pbs = self._call_fut(document_path, document_data, merge=True) - - update_pb = self._make_write_w_document(document_path, **update_data) - self._update_document_mask(update_pb, field_paths=sorted(update_data)) - transform_pb = self._make_write_w_transform(document_path, fields=["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = SERVER_TIMESTAMP - - write_pbs = self._call_fut( - document_path, document_data, merge=["cheese", "butter"] - ) - - update_pb = self._make_write_w_document( - document_path, cheese=document_data["cheese"] - ) - self._update_document_mask(update_pb, ["cheese"]) - transform_pb = self._make_write_w_transform(document_path, fields=["butter"]) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform_masking_simple(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = {"pecan": SERVER_TIMESTAMP} - - write_pbs = self._call_fut(document_path, document_data, merge=["butter.pecan"]) - - update_pb = self._make_write_w_document(document_path) - transform_pb = self._make_write_w_transform( - document_path, fields=["butter.pecan"] - ) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - def test_with_merge_field_w_transform_parent(self): - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - - document_path = _make_ref_string(u"little", u"town", u"of", u"ham") - update_data = {"cheese": 1.5, "crackers": True} - document_data = update_data.copy() - document_data["butter"] = {"popcorn": "yum", "pecan": SERVER_TIMESTAMP} - - write_pbs = self._call_fut( - document_path, document_data, merge=["cheese", "butter"] - ) - - update_pb = self._make_write_w_document( - document_path, cheese=update_data["cheese"], butter={"popcorn": "yum"} - ) - self._update_document_mask(update_pb, ["cheese", "butter"]) - transform_pb = self._make_write_w_transform( - document_path, fields=["butter.pecan"] - ) - expected_pbs = [update_pb, transform_pb] - self.assertEqual(write_pbs, expected_pbs) - - -class TestDocumentExtractorForUpdate(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1 import _helpers - - return _helpers.DocumentExtractorForUpdate - - def _make_one(self, document_data): - return self._get_target_class()(document_data) - - def test_ctor_w_empty_document(self): - document_data = {} - - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, []) - - def test_ctor_w_simple_keys(self): - document_data = {"a": 1, "b": 2, "c": 3} - - expected_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_nested_keys(self): - document_data = {"a": {"d": {"e": 1}}, "b": {"f": 7}, "c": 3} - - expected_paths = [ - _make_field_path("a"), - _make_field_path("b"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_dotted_keys(self): - document_data = {"a.d.e": 1, "b.f": 7, "c": 3} - - expected_paths = [ - _make_field_path("a", "d", "e"), - _make_field_path("b", "f"), - _make_field_path("c"), - ] - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - - def test_ctor_w_nested_dotted_keys(self): - document_data = {"a.d.e": 1, "b.f": {"h.i": 9}, "c": 3} - - expected_paths = [ - _make_field_path("a", "d", "e"), - _make_field_path("b", "f"), - _make_field_path("c"), - ] - expected_set_fields = {"a": {"d": {"e": 1}}, "b": {"f": {"h.i": 9}}, "c": 3} - inst = self._make_one(document_data) - self.assertEqual(inst.top_level_paths, expected_paths) - self.assertEqual(inst.set_fields, expected_set_fields) - - -class Test_pbs_for_update(unittest.TestCase): - @staticmethod - def _call_fut(document_path, field_updates, option): - from google.cloud.firestore_v1beta1._helpers import pbs_for_update - - return pbs_for_update(document_path, field_updates, option) - - def _helper(self, option=None, do_transform=False, **write_kwargs): - from google.cloud.firestore_v1beta1 import _helpers - from google.cloud.firestore_v1beta1.field_path import FieldPath - from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - document_path = _make_ref_string(u"toy", u"car", u"onion", u"garlic") - field_path1 = "bitez.yum" - value = b"\x00\x01" - field_path2 = "blog.internet" - - field_updates = {field_path1: value} - if do_transform: - field_updates[field_path2] = SERVER_TIMESTAMP - - write_pbs = self._call_fut(document_path, field_updates, option) - - map_pb = document_pb2.MapValue(fields={"yum": _value_pb(bytes_value=value)}) - - field_paths = [field_path1] - - expected_update_pb = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields={"bitez": _value_pb(map_value=map_pb)} - ), - update_mask=common_pb2.DocumentMask(field_paths=field_paths), - **write_kwargs - ) - if isinstance(option, _helpers.ExistsOption): - precondition = common_pb2.Precondition(exists=False) - expected_update_pb.current_document.CopyFrom(precondition) - expected_pbs = [expected_update_pb] - if do_transform: - transform_paths = FieldPath.from_string(field_path2) - server_val = enums.DocumentTransform.FieldTransform.ServerValue - expected_transform_pb = write_pb2.Write( - transform=write_pb2.DocumentTransform( - document=document_path, - field_transforms=[ - write_pb2.DocumentTransform.FieldTransform( - field_path=transform_paths.to_api_repr(), - set_to_server_value=server_val.REQUEST_TIME, - ) - ], - ) - ) - expected_pbs.append(expected_transform_pb) - self.assertEqual(write_pbs, expected_pbs) - - def test_without_option(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - precondition = common_pb2.Precondition(exists=True) - self._helper(current_document=precondition) - - def test_with_exists_option(self): - from google.cloud.firestore_v1beta1.client import _helpers - - option = _helpers.ExistsOption(False) - self._helper(option=option) - - def test_update_and_transform(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - precondition = common_pb2.Precondition(exists=True) - self._helper(current_document=precondition, do_transform=True) - - -class Test_pb_for_delete(unittest.TestCase): - @staticmethod - def _call_fut(document_path, option): - from google.cloud.firestore_v1beta1._helpers import pb_for_delete - - return pb_for_delete(document_path, option) - - def _helper(self, option=None, **write_kwargs): - from google.cloud.firestore_v1beta1.proto import write_pb2 - - document_path = _make_ref_string(u"chicken", u"philly", u"one", u"two") - write_pb = self._call_fut(document_path, option) - - expected_pb = write_pb2.Write(delete=document_path, **write_kwargs) - self.assertEqual(write_pb, expected_pb) - - def test_without_option(self): - self._helper() - - def test_with_option(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - update_time = timestamp_pb2.Timestamp(seconds=1309700594, nanos=822211297) - option = _helpers.LastUpdateOption(update_time) - precondition = common_pb2.Precondition(update_time=update_time) - self._helper(option=option, current_document=precondition) - - -class Test_get_transaction_id(unittest.TestCase): - @staticmethod - def _call_fut(transaction, **kwargs): - from google.cloud.firestore_v1beta1._helpers import get_transaction_id - - return get_transaction_id(transaction, **kwargs) - - def test_no_transaction(self): - ret_val = self._call_fut(None) - self.assertIsNone(ret_val) - - def test_invalid_transaction(self): - from google.cloud.firestore_v1beta1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - self.assertFalse(transaction.in_progress) - with self.assertRaises(ValueError): - self._call_fut(transaction) - - def test_after_writes_not_allowed(self): - from google.cloud.firestore_v1beta1._helpers import ReadAfterWriteError - from google.cloud.firestore_v1beta1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - transaction._id = b"under-hook" - transaction._write_pbs.append(mock.sentinel.write) - - with self.assertRaises(ReadAfterWriteError): - self._call_fut(transaction) - - def test_after_writes_allowed(self): - from google.cloud.firestore_v1beta1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - txn_id = b"we-are-0fine" - transaction._id = txn_id - transaction._write_pbs.append(mock.sentinel.write) - - ret_val = self._call_fut(transaction, read_operation=False) - self.assertEqual(ret_val, txn_id) - - def test_good_transaction(self): - from google.cloud.firestore_v1beta1.transaction import Transaction - - transaction = Transaction(mock.sentinel.client) - txn_id = b"doubt-it" - transaction._id = txn_id - self.assertTrue(transaction.in_progress) - - self.assertEqual(self._call_fut(transaction), txn_id) - - -class Test_metadata_with_prefix(unittest.TestCase): - @staticmethod - def _call_fut(database_string): - from google.cloud.firestore_v1beta1._helpers import metadata_with_prefix - - return metadata_with_prefix(database_string) - - def test_it(self): - database_string = u"projects/prahj/databases/dee-bee" - metadata = self._call_fut(database_string) - - self.assertEqual(metadata, [("google-cloud-resource-prefix", database_string)]) - - -class TestWriteOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1._helpers import WriteOption - - return WriteOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_modify_write(self): - option = self._make_one() - with self.assertRaises(NotImplementedError): - option.modify_write(None) - - -class TestLastUpdateOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1._helpers import LastUpdateOption - - return LastUpdateOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - option = self._make_one(mock.sentinel.timestamp) - self.assertIs(option._last_update_time, mock.sentinel.timestamp) - - def test___eq___different_type(self): - option = self._make_one(mock.sentinel.timestamp) - other = object() - self.assertFalse(option == other) - - def test___eq___different_timestamp(self): - option = self._make_one(mock.sentinel.timestamp) - other = self._make_one(mock.sentinel.other_timestamp) - self.assertFalse(option == other) - - def test___eq___same_timestamp(self): - option = self._make_one(mock.sentinel.timestamp) - other = self._make_one(mock.sentinel.timestamp) - self.assertTrue(option == other) - - def test_modify_write_update_time(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=683893592, nanos=229362000) - option = self._make_one(timestamp_pb) - write_pb = write_pb2.Write() - ret_val = option.modify_write(write_pb) - - self.assertIsNone(ret_val) - expected_doc = common_pb2.Precondition(update_time=timestamp_pb) - self.assertEqual(write_pb.current_document, expected_doc) - - -class TestExistsOption(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1._helpers import ExistsOption - - return ExistsOption - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - option = self._make_one(mock.sentinel.totes_bool) - self.assertIs(option._exists, mock.sentinel.totes_bool) - - def test___eq___different_type(self): - option = self._make_one(mock.sentinel.timestamp) - other = object() - self.assertFalse(option == other) - - def test___eq___different_exists(self): - option = self._make_one(True) - other = self._make_one(False) - self.assertFalse(option == other) - - def test___eq___same_exists(self): - option = self._make_one(True) - other = self._make_one(True) - self.assertTrue(option == other) - - def test_modify_write(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - for exists in (True, False): - option = self._make_one(exists) - write_pb = write_pb2.Write() - ret_val = option.modify_write(write_pb) - - self.assertIsNone(ret_val) - expected_doc = common_pb2.Precondition(exists=exists) - self.assertEqual(write_pb.current_document, expected_doc) - - -def _value_pb(**kwargs): - from google.cloud.firestore_v1beta1.proto.document_pb2 import Value - - return Value(**kwargs) - - -def _make_ref_string(project, database, *path): - from google.cloud.firestore_v1beta1 import _helpers - - doc_rel_path = _helpers.DOCUMENT_PATH_DELIMITER.join(path) - return u"projects/{}/databases/{}/documents/{}".format( - project, database, doc_rel_path - ) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="quark"): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - return Client(project=project, credentials=credentials) - - -def _make_field_path(*fields): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.FieldPath(*fields) diff --git a/firestore/tests/unit/v1beta1/test_batch.py b/firestore/tests/unit/v1beta1/test_batch.py deleted file mode 100644 index 831424751594..000000000000 --- a/firestore/tests/unit/v1beta1/test_batch.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock -import pytest - - -class TestWriteBatch(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.batch import WriteBatch - - return WriteBatch - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - batch = self._make_one(mock.sentinel.client) - self.assertIs(batch._client, mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - self.assertIsNone(batch.write_results) - self.assertIsNone(batch.commit_time) - - def test__add_write_pbs(self): - batch = self._make_one(mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - batch._add_write_pbs([mock.sentinel.write1, mock.sentinel.write2]) - self.assertEqual(batch._write_pbs, [mock.sentinel.write1, mock.sentinel.write2]) - - def test_create(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("this", "one") - document_data = {"a": 10, "b": 2.5} - ret_val = batch.create(reference, document_data) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={ - "a": _value_pb(integer_value=document_data["a"]), - "b": _value_pb(double_value=document_data["b"]), - }, - ), - current_document=common_pb2.Precondition(exists=False), - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_set(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("another", "one") - field = "zapzap" - value = u"meadows and flowers" - document_data = {field: value} - ret_val = batch.set(reference, document_data) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={field: _value_pb(string_value=value)}, - ) - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_set_merge(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("another", "one") - field = "zapzap" - value = u"meadows and flowers" - document_data = {field: value} - ret_val = batch.set(reference, document_data, merge=True) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={field: _value_pb(string_value=value)}, - ), - update_mask={"field_paths": [field]}, - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_update(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("cats", "cradle") - field_path = "head.foot" - value = u"knees toes shoulders" - field_updates = {field_path: value} - - ret_val = batch.update(reference, field_updates) - self.assertIsNone(ret_val) - - map_pb = document_pb2.MapValue(fields={"foot": _value_pb(string_value=value)}) - new_write_pb = write_pb2.Write( - update=document_pb2.Document( - name=reference._document_path, - fields={"head": _value_pb(map_value=map_pb)}, - ), - update_mask=common_pb2.DocumentMask(field_paths=[field_path]), - current_document=common_pb2.Precondition(exists=True), - ) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_delete(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - - client = _make_client() - batch = self._make_one(client) - self.assertEqual(batch._write_pbs, []) - - reference = client.document("early", "mornin", "dawn", "now") - ret_val = batch.delete(reference) - self.assertIsNone(ret_val) - new_write_pb = write_pb2.Write(delete=reference._document_path) - self.assertEqual(batch._write_pbs, [new_write_pb]) - - def test_commit(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.Mock(spec=["commit"]) - timestamp = timestamp_pb2.Timestamp(seconds=1234567, nanos=123456798) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult(), write_pb2.WriteResult()], - commit_time=timestamp, - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client("grand") - client._firestore_api_internal = firestore_api - - # Actually make a batch with some mutations and call commit(). - batch = self._make_one(client) - document1 = client.document("a", "b") - batch.create(document1, {"ten": 10, "buck": u"ets"}) - document2 = client.document("c", "d", "e", "f") - batch.delete(document2) - write_pbs = batch._write_pbs[::] - - write_results = batch.commit() - self.assertEqual(write_results, list(commit_response.write_results)) - self.assertEqual(batch.write_results, write_results) - self.assertEqual(batch.commit_time, timestamp) - # Make sure batch has no more "changes". - self.assertEqual(batch._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_as_context_mgr_wo_error(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - firestore_api = mock.Mock(spec=["commit"]) - timestamp = timestamp_pb2.Timestamp(seconds=1234567, nanos=123456798) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult(), write_pb2.WriteResult()], - commit_time=timestamp, - ) - firestore_api.commit.return_value = commit_response - client = _make_client() - client._firestore_api_internal = firestore_api - batch = self._make_one(client) - document1 = client.document("a", "b") - document2 = client.document("c", "d", "e", "f") - - with batch as ctx_mgr: - self.assertIs(ctx_mgr, batch) - ctx_mgr.create(document1, {"ten": 10, "buck": u"ets"}) - ctx_mgr.delete(document2) - write_pbs = batch._write_pbs[::] - - self.assertEqual(batch.write_results, list(commit_response.write_results)) - self.assertEqual(batch.commit_time, timestamp) - # Make sure batch has no more "changes". - self.assertEqual(batch._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_as_context_mgr_w_error(self): - firestore_api = mock.Mock(spec=["commit"]) - client = _make_client() - client._firestore_api_internal = firestore_api - batch = self._make_one(client) - document1 = client.document("a", "b") - document2 = client.document("c", "d", "e", "f") - - with self.assertRaises(RuntimeError): - with batch as ctx_mgr: - ctx_mgr.create(document1, {"ten": 10, "buck": u"ets"}) - ctx_mgr.delete(document2) - raise RuntimeError("testing") - - self.assertIsNone(batch.write_results) - self.assertIsNone(batch.commit_time) - # batch still has its changes - self.assertEqual(len(batch._write_pbs), 2) - - firestore_api.commit.assert_not_called() - - -def _value_pb(**kwargs): - from google.cloud.firestore_v1beta1.proto.document_pb2 import Value - - return Value(**kwargs) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="seventy-nine"): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - return Client(project=project, credentials=credentials) diff --git a/firestore/tests/unit/v1beta1/test_client.py b/firestore/tests/unit/v1beta1/test_client.py deleted file mode 100644 index 4aa5a36efb71..000000000000 --- a/firestore/tests/unit/v1beta1/test_client.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import types -import unittest - -import mock -import pytest - - -class TestClient(unittest.TestCase): - - PROJECT = "my-prahjekt" - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.client import Client - - return Client - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def _make_default_one(self): - credentials = _make_credentials() - return self._make_one(project=self.PROJECT, credentials=credentials) - - def test_constructor(self): - from google.cloud.firestore_v1beta1.client import DEFAULT_DATABASE - - credentials = _make_credentials() - - with pytest.deprecated_call(): - client = self._make_one(project=self.PROJECT, credentials=credentials) - - self.assertEqual(client.project, self.PROJECT) - self.assertEqual(client._credentials, credentials) - self.assertEqual(client._database, DEFAULT_DATABASE) - - def test_constructor_explicit(self): - credentials = _make_credentials() - database = "now-db" - - with pytest.deprecated_call(): - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - - self.assertEqual(client.project, self.PROJECT) - self.assertEqual(client._credentials, credentials) - self.assertEqual(client._database, database) - - @mock.patch( - "google.cloud.firestore_v1beta1.gapic.firestore_client." "FirestoreClient", - autospec=True, - return_value=mock.sentinel.firestore_api, - ) - def test__firestore_api_property(self, mock_client): - mock_client.SERVICE_ADDRESS = "endpoint" - - with pytest.deprecated_call(): - client = self._make_default_one() - - self.assertIsNone(client._firestore_api_internal) - firestore_api = client._firestore_api - self.assertIs(firestore_api, mock_client.return_value) - self.assertIs(firestore_api, client._firestore_api_internal) - mock_client.assert_called_once_with(transport=client._transport) - - # Call again to show that it is cached, but call count is still 1. - self.assertIs(client._firestore_api, mock_client.return_value) - self.assertEqual(mock_client.call_count, 1) - - def test___database_string_property(self): - credentials = _make_credentials() - database = "cheeeeez" - - with pytest.deprecated_call(): - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - - self.assertIsNone(client._database_string_internal) - database_string = client._database_string - expected = "projects/{}/databases/{}".format(client.project, client._database) - self.assertEqual(database_string, expected) - self.assertIs(database_string, client._database_string_internal) - - # Swap it out with a unique value to verify it is cached. - client._database_string_internal = mock.sentinel.cached - self.assertIs(client._database_string, mock.sentinel.cached) - - def test___rpc_metadata_property(self): - credentials = _make_credentials() - database = "quanta" - - with pytest.deprecated_call(): - client = self._make_one( - project=self.PROJECT, credentials=credentials, database=database - ) - - self.assertEqual( - client._rpc_metadata, - [("google-cloud-resource-prefix", client._database_string)], - ) - - def test_collection_factory(self): - from google.cloud.firestore_v1beta1.collection import CollectionReference - - collection_id = "users" - - with pytest.deprecated_call(): - client = self._make_default_one() - - collection = client.collection(collection_id) - - self.assertEqual(collection._path, (collection_id,)) - self.assertIs(collection._client, client) - self.assertIsInstance(collection, CollectionReference) - - def test_collection_factory_nested(self): - from google.cloud.firestore_v1beta1.collection import CollectionReference - - with pytest.deprecated_call(): - client = self._make_default_one() - - parts = ("users", "alovelace", "beep") - collection_path = "/".join(parts) - collection1 = client.collection(collection_path) - - self.assertEqual(collection1._path, parts) - self.assertIs(collection1._client, client) - self.assertIsInstance(collection1, CollectionReference) - - # Make sure using segments gives the same result. - collection2 = client.collection(*parts) - self.assertEqual(collection2._path, parts) - self.assertIs(collection2._client, client) - self.assertIsInstance(collection2, CollectionReference) - - def test_document_factory(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - parts = ("rooms", "roomA") - - with pytest.deprecated_call(): - client = self._make_default_one() - - doc_path = "/".join(parts) - document1 = client.document(doc_path) - - self.assertEqual(document1._path, parts) - self.assertIs(document1._client, client) - self.assertIsInstance(document1, DocumentReference) - - # Make sure using segments gives the same result. - document2 = client.document(*parts) - self.assertEqual(document2._path, parts) - self.assertIs(document2._client, client) - self.assertIsInstance(document2, DocumentReference) - - def test_document_factory_nested(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - with pytest.deprecated_call(): - client = self._make_default_one() - - parts = ("rooms", "roomA", "shoes", "dressy") - doc_path = "/".join(parts) - document1 = client.document(doc_path) - - self.assertEqual(document1._path, parts) - self.assertIs(document1._client, client) - self.assertIsInstance(document1, DocumentReference) - - # Make sure using segments gives the same result. - document2 = client.document(*parts) - self.assertEqual(document2._path, parts) - self.assertIs(document2._client, client) - self.assertIsInstance(document2, DocumentReference) - - def test_field_path(self): - klass = self._get_target_class() - self.assertEqual(klass.field_path("a", "b", "c"), "a.b.c") - - def test_write_option_last_update(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1._helpers import LastUpdateOption - - timestamp = timestamp_pb2.Timestamp(seconds=1299767599, nanos=811111097) - - klass = self._get_target_class() - option = klass.write_option(last_update_time=timestamp) - self.assertIsInstance(option, LastUpdateOption) - self.assertEqual(option._last_update_time, timestamp) - - def test_write_option_exists(self): - from google.cloud.firestore_v1beta1._helpers import ExistsOption - - klass = self._get_target_class() - - option1 = klass.write_option(exists=False) - self.assertIsInstance(option1, ExistsOption) - self.assertFalse(option1._exists) - - option2 = klass.write_option(exists=True) - self.assertIsInstance(option2, ExistsOption) - self.assertTrue(option2._exists) - - def test_write_open_neither_arg(self): - from google.cloud.firestore_v1beta1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option() - - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR,)) - - def test_write_multiple_args(self): - from google.cloud.firestore_v1beta1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option(exists=False, last_update_time=mock.sentinel.timestamp) - - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR,)) - - def test_write_bad_arg(self): - from google.cloud.firestore_v1beta1.client import _BAD_OPTION_ERR - - klass = self._get_target_class() - with self.assertRaises(TypeError) as exc_info: - klass.write_option(spinach="popeye") - - extra = "{!r} was provided".format("spinach") - self.assertEqual(exc_info.exception.args, (_BAD_OPTION_ERR, extra)) - - def test_collections(self): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1beta1.collection import CollectionReference - - collection_ids = ["users", "projects"] - - with pytest.deprecated_call(): - client = self._make_default_one() - - firestore_api = mock.Mock(spec=["list_collection_ids"]) - client._firestore_api_internal = firestore_api - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - iterator = _Iterator(pages=[collection_ids]) - firestore_api.list_collection_ids.return_value = iterator - - collections = list(client.collections()) - - self.assertEqual(len(collections), len(collection_ids)) - for collection, collection_id in zip(collections, collection_ids): - self.assertIsInstance(collection, CollectionReference) - self.assertEqual(collection.parent, None) - self.assertEqual(collection.id, collection_id) - - firestore_api.list_collection_ids.assert_called_once_with( - client._database_string, metadata=client._rpc_metadata - ) - - def _get_all_helper(self, client, references, document_pbs, **kwargs): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["batch_get_documents"]) - response_iterator = iter(document_pbs) - firestore_api.batch_get_documents.return_value = response_iterator - - # Attach the fake GAPIC to a real client. - client._firestore_api_internal = firestore_api - - # Actually call get_all(). - snapshots = client.get_all(references, **kwargs) - self.assertIsInstance(snapshots, types.GeneratorType) - - return list(snapshots) - - def _info_for_get_all(self, data1, data2): - - with pytest.deprecated_call(): - client = self._make_default_one() - - document1 = client.document("pineapple", "lamp1") - document2 = client.document("pineapple", "lamp2") - - # Make response protobufs. - document_pb1, read_time = _doc_get_info(document1._document_path, data1) - response1 = _make_batch_response(found=document_pb1, read_time=read_time) - - document_pb2, read_time = _doc_get_info(document2._document_path, data2) - response2 = _make_batch_response(found=document_pb2, read_time=read_time) - - return client, document1, document2, response1, response2 - - def test_get_all(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - data1 = {"a": u"cheese"} - data2 = {"b": True, "c": 18} - info = self._info_for_get_all(data1, data2) - client, document1, document2, response1, response2 = info - - # Exercise the mocked ``batch_get_documents``. - field_paths = ["a", "b"] - snapshots = self._get_all_helper( - client, - [document1, document2], - [response1, response2], - field_paths=field_paths, - ) - self.assertEqual(len(snapshots), 2) - - snapshot1 = snapshots[0] - self.assertIsInstance(snapshot1, DocumentSnapshot) - self.assertIs(snapshot1._reference, document1) - self.assertEqual(snapshot1._data, data1) - - snapshot2 = snapshots[1] - self.assertIsInstance(snapshot2, DocumentSnapshot) - self.assertIs(snapshot2._reference, document2) - self.assertEqual(snapshot2._data, data2) - - # Verify the call to the mock. - doc_paths = [document1._document_path, document2._document_path] - mask = common_pb2.DocumentMask(field_paths=field_paths) - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - mask, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_get_all_with_transaction(self): - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - data = {"so-much": 484} - info = self._info_for_get_all(data, {}) - client, document, _, response, _ = info - transaction = client.transaction() - txn_id = b"the-man-is-non-stop" - transaction._id = txn_id - - # Exercise the mocked ``batch_get_documents``. - snapshots = self._get_all_helper( - client, [document], [response], transaction=transaction - ) - self.assertEqual(len(snapshots), 1) - - snapshot = snapshots[0] - self.assertIsInstance(snapshot, DocumentSnapshot) - self.assertIs(snapshot._reference, document) - self.assertEqual(snapshot._data, data) - - # Verify the call to the mock. - doc_paths = [document._document_path] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test_get_all_unknown_result(self): - from google.cloud.firestore_v1beta1.client import _BAD_DOC_TEMPLATE - - info = self._info_for_get_all({"z": 28.5}, {}) - client, document, _, _, response = info - - # Exercise the mocked ``batch_get_documents``. - with self.assertRaises(ValueError) as exc_info: - self._get_all_helper(client, [document], [response]) - - err_msg = _BAD_DOC_TEMPLATE.format(response.found.name) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - # Verify the call to the mock. - doc_paths = [document._document_path] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_get_all_wrong_order(self): - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - data1 = {"up": 10} - data2 = {"down": -10} - info = self._info_for_get_all(data1, data2) - client, document1, document2, response1, response2 = info - document3 = client.document("pineapple", "lamp3") - response3 = _make_batch_response(missing=document3._document_path) - - # Exercise the mocked ``batch_get_documents``. - snapshots = self._get_all_helper( - client, [document1, document2, document3], [response2, response1, response3] - ) - - self.assertEqual(len(snapshots), 3) - - snapshot1 = snapshots[0] - self.assertIsInstance(snapshot1, DocumentSnapshot) - self.assertIs(snapshot1._reference, document2) - self.assertEqual(snapshot1._data, data2) - - snapshot2 = snapshots[1] - self.assertIsInstance(snapshot2, DocumentSnapshot) - self.assertIs(snapshot2._reference, document1) - self.assertEqual(snapshot2._data, data1) - - self.assertFalse(snapshots[2].exists) - - # Verify the call to the mock. - doc_paths = [ - document1._document_path, - document2._document_path, - document3._document_path, - ] - client._firestore_api.batch_get_documents.assert_called_once_with( - client._database_string, - doc_paths, - None, - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_batch(self): - from google.cloud.firestore_v1beta1.batch import WriteBatch - - with pytest.deprecated_call(): - client = self._make_default_one() - - batch = client.batch() - self.assertIsInstance(batch, WriteBatch) - self.assertIs(batch._client, client) - self.assertEqual(batch._write_pbs, []) - - def test_transaction(self): - from google.cloud.firestore_v1beta1.transaction import Transaction - - with pytest.deprecated_call(): - client = self._make_default_one() - - transaction = client.transaction(max_attempts=3, read_only=True) - self.assertIsInstance(transaction, Transaction) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, 3) - self.assertTrue(transaction._read_only) - self.assertIsNone(transaction._id) - - -class Test__reference_info(unittest.TestCase): - @staticmethod - def _call_fut(references): - from google.cloud.firestore_v1beta1.client import _reference_info - - return _reference_info(references) - - def test_it(self): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - client = Client(project="hi-projject", credentials=credentials) - - reference1 = client.document("a", "b") - reference2 = client.document("a", "b", "c", "d") - reference3 = client.document("a", "b") - reference4 = client.document("f", "g") - - doc_path1 = reference1._document_path - doc_path2 = reference2._document_path - doc_path3 = reference3._document_path - doc_path4 = reference4._document_path - self.assertEqual(doc_path1, doc_path3) - - document_paths, reference_map = self._call_fut( - [reference1, reference2, reference3, reference4] - ) - self.assertEqual(document_paths, [doc_path1, doc_path2, doc_path3, doc_path4]) - # reference3 over-rides reference1. - expected_map = { - doc_path2: reference2, - doc_path3: reference3, - doc_path4: reference4, - } - self.assertEqual(reference_map, expected_map) - - -class Test__get_reference(unittest.TestCase): - @staticmethod - def _call_fut(document_path, reference_map): - from google.cloud.firestore_v1beta1.client import _get_reference - - return _get_reference(document_path, reference_map) - - def test_success(self): - doc_path = "a/b/c" - reference_map = {doc_path: mock.sentinel.reference} - self.assertIs(self._call_fut(doc_path, reference_map), mock.sentinel.reference) - - def test_failure(self): - from google.cloud.firestore_v1beta1.client import _BAD_DOC_TEMPLATE - - doc_path = "1/888/call-now" - with self.assertRaises(ValueError) as exc_info: - self._call_fut(doc_path, {}) - - err_msg = _BAD_DOC_TEMPLATE.format(doc_path) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class Test__parse_batch_get(unittest.TestCase): - @staticmethod - def _call_fut(get_doc_response, reference_map, client=mock.sentinel.client): - from google.cloud.firestore_v1beta1.client import _parse_batch_get - - return _parse_batch_get(get_doc_response, reference_map, client) - - @staticmethod - def _dummy_ref_string(): - from google.cloud.firestore_v1beta1.client import DEFAULT_DATABASE - - project = u"bazzzz" - collection_id = u"fizz" - document_id = u"buzz" - return u"projects/{}/databases/{}/documents/{}/{}".format( - project, DEFAULT_DATABASE, collection_id, document_id - ) - - def test_found(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - - ref_string = self._dummy_ref_string() - document_pb = document_pb2.Document( - name=ref_string, - fields={ - "foo": document_pb2.Value(double_value=1.5), - "bar": document_pb2.Value(string_value=u"skillz"), - }, - create_time=create_time, - update_time=update_time, - ) - response_pb = _make_batch_response(found=document_pb, read_time=read_time) - - reference_map = {ref_string: mock.sentinel.reference} - snapshot = self._call_fut(response_pb, reference_map) - self.assertIsInstance(snapshot, DocumentSnapshot) - self.assertIs(snapshot._reference, mock.sentinel.reference) - self.assertEqual(snapshot._data, {"foo": 1.5, "bar": u"skillz"}) - self.assertTrue(snapshot._exists) - self.assertEqual(snapshot.read_time, read_time) - self.assertEqual(snapshot.create_time, create_time) - self.assertEqual(snapshot.update_time, update_time) - - def test_missing(self): - ref_string = self._dummy_ref_string() - response_pb = _make_batch_response(missing=ref_string) - - snapshot = self._call_fut(response_pb, {}) - self.assertFalse(snapshot.exists) - - def test_unset_result_type(self): - response_pb = _make_batch_response() - with self.assertRaises(ValueError): - self._call_fut(response_pb, {}) - - def test_unknown_result_type(self): - response_pb = mock.Mock(spec=["WhichOneof"]) - response_pb.WhichOneof.return_value = "zoob_value" - - with self.assertRaises(ValueError): - self._call_fut(response_pb, {}) - - response_pb.WhichOneof.assert_called_once_with("result") - - -class Test__get_doc_mask(unittest.TestCase): - @staticmethod - def _call_fut(field_paths): - from google.cloud.firestore_v1beta1.client import _get_doc_mask - - return _get_doc_mask(field_paths) - - def test_none(self): - self.assertIsNone(self._call_fut(None)) - - def test_paths(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - field_paths = ["a.b", "c"] - result = self._call_fut(field_paths) - expected = common_pb2.DocumentMask(field_paths=field_paths) - self.assertEqual(result, expected) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_batch_response(**kwargs): - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - - return firestore_pb2.BatchGetDocumentsResponse(**kwargs) - - -def _doc_get_info(ref_string, values): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1beta1 import _helpers - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - - document_pb = document_pb2.Document( - name=ref_string, - fields=_helpers.encode_dict(values), - create_time=create_time, - update_time=update_time, - ) - - return document_pb, read_time diff --git a/firestore/tests/unit/v1beta1/test_collection.py b/firestore/tests/unit/v1beta1/test_collection.py deleted file mode 100644 index 2bc7695ae940..000000000000 --- a/firestore/tests/unit/v1beta1/test_collection.py +++ /dev/null @@ -1,593 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import types -import unittest - -import mock -import pytest -import six - - -class TestCollectionReference(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.collection import CollectionReference - - return CollectionReference - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - @staticmethod - def _get_public_methods(klass): - return set( - name - for name, value in six.iteritems(klass.__dict__) - if (not name.startswith("_") and isinstance(value, types.FunctionType)) - ) - - def test_query_method_matching(self): - from google.cloud.firestore_v1beta1.query import Query - - query_methods = self._get_public_methods(Query) - klass = self._get_target_class() - collection_methods = self._get_public_methods(klass) - # Make sure every query method is present on - # ``CollectionReference``. - self.assertLessEqual(query_methods, collection_methods) - - def test_constructor(self): - collection_id1 = "rooms" - document_id = "roomA" - collection_id2 = "messages" - client = mock.sentinel.client - - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - self.assertIs(collection._client, client) - expected_path = (collection_id1, document_id, collection_id2) - self.assertEqual(collection._path, expected_path) - - def test_constructor_invalid_path(self): - with self.assertRaises(ValueError): - self._make_one() - with self.assertRaises(ValueError): - self._make_one(99, "doc", "bad-collection-id") - with self.assertRaises(ValueError): - self._make_one("bad-document-ID", None, "sub-collection") - with self.assertRaises(ValueError): - self._make_one("Just", "A-Document") - - def test_constructor_invalid_kwarg(self): - with self.assertRaises(TypeError): - self._make_one("Coh-lek-shun", donut=True) - - def test___eq___other_type(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = object() - self.assertFalse(collection == other) - - def test___eq___different_path_same_client(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = self._make_one("other", client=client) - self.assertFalse(collection == other) - - def test___eq___same_path_different_client(self): - client = mock.sentinel.client - other_client = mock.sentinel.other_client - collection = self._make_one("name", client=client) - other = self._make_one("name", client=other_client) - self.assertFalse(collection == other) - - def test___eq___same_path_same_client(self): - client = mock.sentinel.client - collection = self._make_one("name", client=client) - other = self._make_one("name", client=client) - self.assertTrue(collection == other) - - def test_id_property(self): - collection_id = "hi-bob" - collection = self._make_one(collection_id) - self.assertEqual(collection.id, collection_id) - - def test_parent_property(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - collection_id1 = "grocery-store" - document_id = "market" - collection_id2 = "darth" - client = _make_client() - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - - parent = collection.parent - self.assertIsInstance(parent, DocumentReference) - self.assertIs(parent._client, client) - self.assertEqual(parent._path, (collection_id1, document_id)) - - def test_parent_property_top_level(self): - collection = self._make_one("tahp-leh-vull") - self.assertIsNone(collection.parent) - - def test_document_factory_explicit_id(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - collection_id = "grocery-store" - document_id = "market" - client = _make_client() - collection = self._make_one(collection_id, client=client) - - child = collection.document(document_id) - self.assertIsInstance(child, DocumentReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_id, document_id)) - - @mock.patch( - "google.cloud.firestore_v1beta1.collection._auto_id", - return_value="zorpzorpthreezorp012", - ) - def test_document_factory_auto_id(self, mock_auto_id): - from google.cloud.firestore_v1beta1.document import DocumentReference - - collection_name = "space-town" - client = _make_client() - collection = self._make_one(collection_name, client=client) - - child = collection.document() - self.assertIsInstance(child, DocumentReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_name, mock_auto_id.return_value)) - - mock_auto_id.assert_called_once_with() - - def test__parent_info_top_level(self): - client = _make_client() - collection_id = "soap" - collection = self._make_one(collection_id, client=client) - - parent_path, expected_prefix = collection._parent_info() - - expected_path = "projects/{}/databases/{}/documents".format( - client.project, client._database - ) - self.assertEqual(parent_path, expected_path) - prefix = "{}/{}".format(expected_path, collection_id) - self.assertEqual(expected_prefix, prefix) - - def test__parent_info_nested(self): - collection_id1 = "bar" - document_id = "baz" - collection_id2 = "chunk" - client = _make_client() - collection = self._make_one( - collection_id1, document_id, collection_id2, client=client - ) - - parent_path, expected_prefix = collection._parent_info() - - expected_path = "projects/{}/databases/{}/documents/{}/{}".format( - client.project, client._database, collection_id1, document_id - ) - self.assertEqual(parent_path, expected_path) - prefix = "{}/{}".format(expected_path, collection_id2) - self.assertEqual(expected_prefix, prefix) - - def test_add_auto_assigned(self): - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.document import DocumentReference - from google.cloud.firestore_v1beta1 import SERVER_TIMESTAMP - from google.cloud.firestore_v1beta1._helpers import pbs_for_set_no_merge - - # Create a minimal fake GAPIC add attach it to a real client. - firestore_api = mock.Mock(spec=["create_document", "commit"]) - write_result = mock.Mock( - update_time=mock.sentinel.update_time, spec=["update_time"] - ) - commit_response = mock.Mock( - write_results=[write_result], - spec=["write_results", "commit_time"], - commit_time=mock.sentinel.commit_time, - ) - firestore_api.commit.return_value = commit_response - create_doc_response = document_pb2.Document() - firestore_api.create_document.return_value = create_doc_response - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a collection. - collection = self._make_one("grand-parent", "parent", "child", client=client) - - # Add a dummy response for the fake GAPIC. - parent_path = collection.parent._document_path - auto_assigned_id = "cheezburger" - name = "{}/{}/{}".format(parent_path, collection.id, auto_assigned_id) - create_doc_response = document_pb2.Document(name=name) - create_doc_response.update_time.FromDatetime(datetime.datetime.utcnow()) - firestore_api.create_document.return_value = create_doc_response - - # Actually call add() on our collection; include a transform to make - # sure transforms during adds work. - document_data = {"been": "here", "now": SERVER_TIMESTAMP} - update_time, document_ref = collection.add(document_data) - - # Verify the response and the mocks. - self.assertIs(update_time, mock.sentinel.update_time) - self.assertIsInstance(document_ref, DocumentReference) - self.assertIs(document_ref._client, client) - expected_path = collection._path + (auto_assigned_id,) - self.assertEqual(document_ref._path, expected_path) - - expected_document_pb = document_pb2.Document() - firestore_api.create_document.assert_called_once_with( - parent_path, - collection_id=collection.id, - document_id=None, - document=expected_document_pb, - mask=None, - metadata=client._rpc_metadata, - ) - write_pbs = pbs_for_set_no_merge(document_ref._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=None, - metadata=client._rpc_metadata, - ) - - @staticmethod - def _write_pb_for_create(document_path, document_data): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ), - current_document=common_pb2.Precondition(exists=False), - ) - - def test_add_explicit_id(self): - from google.cloud.firestore_v1beta1.document import DocumentReference - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - write_result = mock.Mock( - update_time=mock.sentinel.update_time, spec=["update_time"] - ) - commit_response = mock.Mock( - write_results=[write_result], - spec=["write_results", "commit_time"], - commit_time=mock.sentinel.commit_time, - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a collection and call add(). - collection = self._make_one("parent", client=client) - document_data = {"zorp": 208.75, "i-did-not": b"know that"} - doc_id = "child" - update_time, document_ref = collection.add(document_data, document_id=doc_id) - - # Verify the response and the mocks. - self.assertIs(update_time, mock.sentinel.update_time) - self.assertIsInstance(document_ref, DocumentReference) - self.assertIs(document_ref._client, client) - self.assertEqual(document_ref._path, (collection.id, doc_id)) - - write_pb = self._write_pb_for_create(document_ref._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_select(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - field_paths = ["a", "b"] - query = collection.select(field_paths) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - projection_paths = [ - field_ref.field_path for field_ref in query._projection.fields - ] - self.assertEqual(projection_paths, field_paths) - - @staticmethod - def _make_field_filter_pb(field_path, op_string, value): - from google.cloud.firestore_v1beta1.proto import query_pb2 - from google.cloud.firestore_v1beta1 import _helpers - from google.cloud.firestore_v1beta1.query import _enum_from_op_string - - return query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=_enum_from_op_string(op_string), - value=_helpers.encode_value(value), - ) - - def test_where(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - field_path = "foo" - op_string = "==" - value = 45 - query = collection.where(field_path, op_string, value) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(len(query._field_filters), 1) - field_filter_pb = query._field_filters[0] - self.assertEqual( - field_filter_pb, self._make_field_filter_pb(field_path, op_string, value) - ) - - @staticmethod - def _make_order_pb(field_path, direction): - from google.cloud.firestore_v1beta1.proto import query_pb2 - from google.cloud.firestore_v1beta1.query import _enum_from_direction - - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=_enum_from_direction(direction), - ) - - def test_order_by(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - field_path = "foo" - direction = Query.DESCENDING - query = collection.order_by(field_path, direction=direction) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(len(query._orders), 1) - order_pb = query._orders[0] - self.assertEqual(order_pb, self._make_order_pb(field_path, direction)) - - def test_limit(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - limit = 15 - query = collection.limit(limit) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._limit, limit) - - def test_offset(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - offset = 113 - query = collection.offset(offset) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._offset, offset) - - def test_start_at(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - doc_fields = {"a": "b"} - query = collection.start_at(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._start_at, (doc_fields, True)) - - def test_start_after(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - doc_fields = {"d": "foo", "e": 10} - query = collection.start_after(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._start_at, (doc_fields, False)) - - def test_end_before(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - doc_fields = {"bar": 10.5} - query = collection.end_before(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._end_at, (doc_fields, True)) - - def test_end_at(self): - from google.cloud.firestore_v1beta1.query import Query - - collection = self._make_one("collection") - doc_fields = {"opportunity": True, "reason": 9} - query = collection.end_at(doc_fields) - - self.assertIsInstance(query, Query) - self.assertIs(query._parent, collection) - self.assertEqual(query._end_at, (doc_fields, False)) - - def _list_documents_helper(self, page_size=None): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1beta1.document import DocumentReference - from google.cloud.firestore_v1beta1.gapic.firestore_client import ( - FirestoreClient, - ) - from google.cloud.firestore_v1beta1.proto.document_pb2 import Document - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - client = _make_client() - template = client._database_string + "/documents/{}" - document_ids = ["doc-1", "doc-2"] - documents = [ - Document(name=template.format(document_id)) for document_id in document_ids - ] - iterator = _Iterator(pages=[documents]) - api_client = mock.create_autospec(FirestoreClient) - api_client.list_documents.return_value = iterator - client._firestore_api_internal = api_client - collection = self._make_one("collection", client=client) - - if page_size is not None: - documents = list(collection.list_documents(page_size=page_size)) - else: - documents = list(collection.list_documents()) - - # Verify the response and the mocks. - self.assertEqual(len(documents), len(document_ids)) - for document, document_id in zip(documents, document_ids): - self.assertIsInstance(document, DocumentReference) - self.assertEqual(document.parent, collection) - self.assertEqual(document.id, document_id) - - parent, _ = collection._parent_info() - api_client.list_documents.assert_called_once_with( - parent, - collection.id, - page_size=page_size, - show_missing=True, - metadata=client._rpc_metadata, - ) - - def test_list_documents_wo_page_size(self): - self._list_documents_helper() - - def test_list_documents_w_page_size(self): - self._list_documents_helper(page_size=25) - - @mock.patch("google.cloud.firestore_v1beta1.query.Query", autospec=True) - def test_get(self, query_class): - import warnings - - collection = self._make_one("collection") - with warnings.catch_warnings(record=True) as warned: - get_response = collection.get() - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(get_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=None) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - @mock.patch("google.cloud.firestore_v1beta1.query.Query", autospec=True) - def test_get_with_transaction(self, query_class): - import warnings - - collection = self._make_one("collection") - transaction = mock.sentinel.txn - with warnings.catch_warnings(record=True) as warned: - get_response = collection.get(transaction=transaction) - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(get_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=transaction) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - @mock.patch("google.cloud.firestore_v1beta1.query.Query", autospec=True) - def test_stream(self, query_class): - collection = self._make_one("collection") - stream_response = collection.stream() - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(stream_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=None) - - @mock.patch("google.cloud.firestore_v1beta1.query.Query", autospec=True) - def test_stream_with_transaction(self, query_class): - collection = self._make_one("collection") - transaction = mock.sentinel.txn - stream_response = collection.stream(transaction=transaction) - - query_class.assert_called_once_with(collection) - query_instance = query_class.return_value - self.assertIs(stream_response, query_instance.stream.return_value) - query_instance.stream.assert_called_once_with(transaction=transaction) - - @mock.patch("google.cloud.firestore_v1beta1.collection.Watch", autospec=True) - def test_on_snapshot(self, watch): - collection = self._make_one("collection") - collection.on_snapshot(None) - watch.for_query.assert_called_once() - - -class Test__auto_id(unittest.TestCase): - @staticmethod - def _call_fut(): - from google.cloud.firestore_v1beta1.collection import _auto_id - - return _auto_id() - - @mock.patch("random.choice") - def test_it(self, mock_rand_choice): - from google.cloud.firestore_v1beta1.collection import _AUTO_ID_CHARS - - mock_result = "0123456789abcdefghij" - mock_rand_choice.side_effect = list(mock_result) - result = self._call_fut() - self.assertEqual(result, mock_result) - - mock_calls = [mock.call(_AUTO_ID_CHARS)] * 20 - self.assertEqual(mock_rand_choice.mock_calls, mock_calls) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - with pytest.deprecated_call(): - return Client(project="project-project", credentials=credentials) diff --git a/firestore/tests/unit/v1beta1/test_cross_language.py b/firestore/tests/unit/v1beta1/test_cross_language.py deleted file mode 100644 index d04b71436ff6..000000000000 --- a/firestore/tests/unit/v1beta1/test_cross_language.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import glob -import json -import os - -import mock -import pytest - -from google.protobuf import text_format -from google.cloud.firestore_v1beta1.proto import document_pb2 -from google.cloud.firestore_v1beta1.proto import firestore_pb2 -from google.cloud.firestore_v1beta1.proto import test_v1beta1_pb2 -from google.cloud.firestore_v1beta1.proto import write_pb2 - - -def _load_testproto(filename): - with open(filename, "r") as tp_file: - tp_text = tp_file.read() - test_proto = test_v1beta1_pb2.Test() - text_format.Merge(tp_text, test_proto) - shortname = os.path.split(filename)[-1] - test_proto.description = test_proto.description + " (%s)" % shortname - return test_proto - - -_here = os.path.dirname(__file__) -_glob_expr = "{}/testdata/*.textproto".format(_here) -_globs = glob.glob(_glob_expr) -ALL_TESTPROTOS = [_load_testproto(filename) for filename in sorted(_globs)] - -_CREATE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "create" -] - -_GET_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "get" -] - -_SET_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "set" -] - -_UPDATE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "update" -] - -_UPDATE_PATHS_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "update_paths" -] - -_DELETE_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "delete" -] - -_LISTEN_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "listen" -] - -_QUERY_TESTPROTOS = [ - test_proto - for test_proto in ALL_TESTPROTOS - if test_proto.WhichOneof("test") == "query" -] - - -def _mock_firestore_api(): - firestore_api = mock.Mock(spec=["commit"]) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - return firestore_api - - -def _make_client_document(firestore_api, testcase): - from google.cloud.firestore_v1beta1 import Client - from google.cloud.firestore_v1beta1.client import DEFAULT_DATABASE - import google.auth.credentials - - _, project, _, database, _, doc_path = testcase.doc_ref_path.split("/", 5) - assert database == DEFAULT_DATABASE - - # Attach the fake GAPIC to a real client. - credentials = mock.Mock(spec=google.auth.credentials.Credentials) - - with pytest.deprecated_call(): - client = Client(project=project, credentials=credentials) - - client._firestore_api_internal = firestore_api - return client, client.document(doc_path) - - -def _run_testcase(testcase, call, firestore_api, client): - if getattr(testcase, "is_error", False): - # TODO: is there a subclass of Exception we can check for? - with pytest.raises(Exception): - call() - else: - call() - firestore_api.commit.assert_called_once_with( - client._database_string, - list(testcase.request.writes), - transaction=None, - metadata=client._rpc_metadata, - ) - - -@pytest.mark.parametrize("test_proto", _CREATE_TESTPROTOS) -def test_create_testprotos(test_proto): - testcase = test_proto.create - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - call = functools.partial(document.create, data) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _GET_TESTPROTOS) -def test_get_testprotos(test_proto): - testcase = test_proto.get - firestore_api = mock.Mock(spec=["get_document"]) - response = document_pb2.Document() - firestore_api.get_document.return_value = response - client, document = _make_client_document(firestore_api, testcase) - - document.get() # No '.textprotos' for errors, field_paths. - - firestore_api.get_document.assert_called_once_with( - document._document_path, - mask=None, - transaction=None, - metadata=client._rpc_metadata, - ) - - -@pytest.mark.parametrize("test_proto", _SET_TESTPROTOS) -def test_set_testprotos(test_proto): - testcase = test_proto.set - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - if testcase.HasField("option"): - merge = convert_set_option(testcase.option) - else: - merge = False - call = functools.partial(document.set, data, merge=merge) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _UPDATE_TESTPROTOS) -def test_update_testprotos(test_proto): - testcase = test_proto.update - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - data = convert_data(json.loads(testcase.json_data)) - if testcase.HasField("precondition"): - option = convert_precondition(testcase.precondition) - else: - option = None - call = functools.partial(document.update, data, option) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.skip(reason="Python has no way to call update with a list of field paths.") -@pytest.mark.parametrize("test_proto", _UPDATE_PATHS_TESTPROTOS) -def test_update_paths_testprotos(test_proto): # pragma: NO COVER - pass - - -@pytest.mark.parametrize("test_proto", _DELETE_TESTPROTOS) -def test_delete_testprotos(test_proto): - testcase = test_proto.delete - firestore_api = _mock_firestore_api() - client, document = _make_client_document(firestore_api, testcase) - if testcase.HasField("precondition"): - option = convert_precondition(testcase.precondition) - else: - option = None - call = functools.partial(document.delete, option) - _run_testcase(testcase, call, firestore_api, client) - - -@pytest.mark.parametrize("test_proto", _LISTEN_TESTPROTOS) -def test_listen_testprotos(test_proto): # pragma: NO COVER - # test_proto.listen has 'reponses' messages, - # 'google.firestore.v1beta1.ListenResponse' - # and then an expected list of 'snapshots' (local 'Snapshot'), containing - # 'docs' (list of 'google.firestore.v1beta1.Document'), - # 'changes' (list lof local 'DocChange', and 'read_time' timestamp. - from google.cloud.firestore_v1beta1 import Client - from google.cloud.firestore_v1beta1 import DocumentReference - from google.cloud.firestore_v1beta1 import DocumentSnapshot - from google.cloud.firestore_v1beta1 import Watch - import google.auth.credentials - - testcase = test_proto.listen - testname = test_proto.description - - credentials = mock.Mock(spec=google.auth.credentials.Credentials) - - with pytest.deprecated_call(): - client = Client(project="project", credentials=credentials) - - modulename = "google.cloud.firestore_v1beta1.watch" - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - with mock.patch( # conformance data sets WATCH_TARGET_ID to 1 - "%s.WATCH_TARGET_ID" % modulename, 1 - ): - snapshots = [] - - def callback(keys, applied_changes, read_time): - snapshots.append((keys, applied_changes, read_time)) - - query = DummyQuery(client=client) - watch = Watch.for_query( - query, callback, DocumentSnapshot, DocumentReference - ) - # conformance data has db string as this - db_str = "projects/projectID/databases/(default)" - watch._firestore._database_string_internal = db_str - - if testcase.is_error: - try: - for proto in testcase.responses: - watch.on_snapshot(proto) - except RuntimeError: - # listen-target-add-wrong-id.textpro - # listen-target-remove.textpro - pass - - else: - for proto in testcase.responses: - watch.on_snapshot(proto) - - assert len(snapshots) == len(testcase.snapshots) - for i, (expected_snapshot, actual_snapshot) in enumerate( - zip(testcase.snapshots, snapshots) - ): - expected_changes = expected_snapshot.changes - actual_changes = actual_snapshot[1] - if len(expected_changes) != len(actual_changes): - raise AssertionError( - "change length mismatch in %s (snapshot #%s)" - % (testname, i) - ) - for y, (expected_change, actual_change) in enumerate( - zip(expected_changes, actual_changes) - ): - expected_change_kind = expected_change.kind - actual_change_kind = actual_change.type.value - if expected_change_kind != actual_change_kind: - raise AssertionError( - "change type mismatch in %s (snapshot #%s, change #%s')" - % (testname, i, y) - ) - - -@pytest.mark.parametrize("test_proto", _QUERY_TESTPROTOS) -def test_query_testprotos(test_proto): # pragma: NO COVER - testcase = test_proto.query - if testcase.is_error: - with pytest.raises(Exception): - query = parse_query(testcase) - query._to_protobuf() - else: - query = parse_query(testcase) - found = query._to_protobuf() - assert found == testcase.query - - -def convert_data(v): - # Replace the strings 'ServerTimestamp' and 'Delete' with the corresponding - # sentinels. - from google.cloud.firestore_v1beta1 import ArrayRemove - from google.cloud.firestore_v1beta1 import ArrayUnion - from google.cloud.firestore_v1beta1 import DELETE_FIELD - from google.cloud.firestore_v1beta1 import SERVER_TIMESTAMP - - if v == "ServerTimestamp": - return SERVER_TIMESTAMP - elif v == "Delete": - return DELETE_FIELD - elif isinstance(v, list): - if v[0] == "ArrayRemove": - return ArrayRemove([convert_data(e) for e in v[1:]]) - if v[0] == "ArrayUnion": - return ArrayUnion([convert_data(e) for e in v[1:]]) - return [convert_data(e) for e in v] - elif isinstance(v, dict): - return {k: convert_data(v2) for k, v2 in v.items()} - elif v == "NaN": - return float(v) - else: - return v - - -def convert_set_option(option): - from google.cloud.firestore_v1beta1 import _helpers - - if option.fields: - return [ - _helpers.FieldPath(*field.field).to_api_repr() for field in option.fields - ] - - assert option.all - return True - - -def convert_precondition(precond): - from google.cloud.firestore_v1beta1 import Client - - if precond.HasField("exists"): - return Client.write_option(exists=precond.exists) - - assert precond.HasField("update_time") - return Client.write_option(last_update_time=precond.update_time) - - -class DummyRpc(object): # pragma: NO COVER - def __init__(self, listen, initial_request, should_recover, metadata=None): - self.listen = listen - self.initial_request = initial_request - self.should_recover = should_recover - self.closed = False - self.callbacks = [] - self._metadata = metadata - - def add_done_callback(self, callback): - self.callbacks.append(callback) - - def close(self): - self.closed = True - - -class DummyBackgroundConsumer(object): # pragma: NO COVER - started = False - stopped = False - is_active = True - - def __init__(self, rpc, on_snapshot): - self._rpc = rpc - self.on_snapshot = on_snapshot - - def start(self): - self.started = True - - def stop(self): - self.stopped = True - self.is_active = False - - -class DummyQuery(object): # pragma: NO COVER - def __init__(self, **kw): - self._client = kw["client"] - self._comparator = lambda x, y: 1 - - def _to_protobuf(self): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - query_kwargs = { - "select": None, - "from": None, - "where": None, - "order_by": None, - "start_at": None, - "end_at": None, - } - return query_pb2.StructuredQuery(**query_kwargs) - - -def parse_query(testcase): - # 'query' testcase contains: - # - 'coll_path': collection ref path. - # - 'clauses': array of one or more 'Clause' elements - # - 'query': the actual google.firestore.v1beta1.StructuredQuery message - # to be constructed. - # - 'is_error' (as other testcases). - # - # 'Clause' elements are unions of: - # - 'select': [field paths] - # - 'where': (field_path, op, json_value) - # - 'order_by': (field_path, direction) - # - 'offset': int - # - 'limit': int - # - 'start_at': 'Cursor' - # - 'start_after': 'Cursor' - # - 'end_at': 'Cursor' - # - 'end_before': 'Cursor' - # - # 'Cursor' contains either: - # - 'doc_snapshot': 'DocSnapshot' - # - 'json_values': [string] - # - # 'DocSnapshot' contains: - # 'path': str - # 'json_data': str - from google.auth.credentials import Credentials - from google.cloud.firestore_v1beta1 import Client - from google.cloud.firestore_v1beta1 import Query - - _directions = {"asc": Query.ASCENDING, "desc": Query.DESCENDING} - - credentials = mock.create_autospec(Credentials) - - with pytest.deprecated_call(): - client = Client("projectID", credentials) - - path = parse_path(testcase.coll_path) - collection = client.collection(*path) - query = collection - - for clause in testcase.clauses: - kind = clause.WhichOneof("clause") - - if kind == "select": - field_paths = [ - ".".join(field_path.field) for field_path in clause.select.fields - ] - query = query.select(field_paths) - elif kind == "where": - path = ".".join(clause.where.path.field) - value = convert_data(json.loads(clause.where.json_value)) - query = query.where(path, clause.where.op, value) - elif kind == "order_by": - path = ".".join(clause.order_by.path.field) - direction = clause.order_by.direction - direction = _directions.get(direction, direction) - query = query.order_by(path, direction=direction) - elif kind == "offset": - query = query.offset(clause.offset) - elif kind == "limit": - query = query.limit(clause.limit) - elif kind == "start_at": - cursor = parse_cursor(clause.start_at, client) - query = query.start_at(cursor) - elif kind == "start_after": - cursor = parse_cursor(clause.start_after, client) - query = query.start_after(cursor) - elif kind == "end_at": - cursor = parse_cursor(clause.end_at, client) - query = query.end_at(cursor) - elif kind == "end_before": - cursor = parse_cursor(clause.end_before, client) - query = query.end_before(cursor) - else: # pragma: NO COVER - raise ValueError("Unknown query clause: {}".format(kind)) - - return query - - -def parse_path(path): - _, relative = path.split("documents/") - return relative.split("/") - - -def parse_cursor(cursor, client): - from google.cloud.firestore_v1beta1 import DocumentReference - from google.cloud.firestore_v1beta1 import DocumentSnapshot - - if cursor.HasField("doc_snapshot"): - path = parse_path(cursor.doc_snapshot.path) - doc_ref = DocumentReference(*path, client=client) - - return DocumentSnapshot( - reference=doc_ref, - data=json.loads(cursor.doc_snapshot.json_data), - exists=True, - read_time=None, - create_time=None, - update_time=None, - ) - - values = [json.loads(value) for value in cursor.json_values] - return convert_data(values) diff --git a/firestore/tests/unit/v1beta1/test_document.py b/firestore/tests/unit/v1beta1/test_document.py deleted file mode 100644 index f9aca713449a..000000000000 --- a/firestore/tests/unit/v1beta1/test_document.py +++ /dev/null @@ -1,830 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import unittest - -import mock -import pytest - - -class TestDocumentReference(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.document import DocumentReference - - return DocumentReference - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - collection_id1 = "users" - document_id1 = "alovelace" - collection_id2 = "platform" - document_id2 = "*nix" - client = mock.MagicMock() - client.__hash__.return_value = 1234 - - document = self._make_one( - collection_id1, document_id1, collection_id2, document_id2, client=client - ) - self.assertIs(document._client, client) - expected_path = "/".join( - (collection_id1, document_id1, collection_id2, document_id2) - ) - self.assertEqual(document.path, expected_path) - - def test_constructor_invalid_path(self): - with self.assertRaises(ValueError): - self._make_one() - with self.assertRaises(ValueError): - self._make_one(None, "before", "bad-collection-id", "fifteen") - with self.assertRaises(ValueError): - self._make_one("bad-document-ID", None) - with self.assertRaises(ValueError): - self._make_one("Just", "A-Collection", "Sub") - - def test_constructor_invalid_kwarg(self): - with self.assertRaises(TypeError): - self._make_one("Coh-lek-shun", "Dahk-yu-mehnt", burger=18.75) - - def test___copy__(self): - client = _make_client("rain") - document = self._make_one("a", "b", client=client) - # Access the document path so it is copied. - doc_path = document._document_path - self.assertEqual(doc_path, document._document_path_internal) - - new_document = document.__copy__() - self.assertIsNot(new_document, document) - self.assertIs(new_document._client, document._client) - self.assertEqual(new_document._path, document._path) - self.assertEqual( - new_document._document_path_internal, document._document_path_internal - ) - - def test___deepcopy__calls_copy(self): - client = mock.sentinel.client - document = self._make_one("a", "b", client=client) - document.__copy__ = mock.Mock(return_value=mock.sentinel.new_doc, spec=[]) - - unused_memo = {} - new_document = document.__deepcopy__(unused_memo) - self.assertIs(new_document, mock.sentinel.new_doc) - document.__copy__.assert_called_once_with() - - def test__eq__same_type(self): - document1 = self._make_one("X", "YY", client=mock.sentinel.client) - document2 = self._make_one("X", "ZZ", client=mock.sentinel.client) - document3 = self._make_one("X", "YY", client=mock.sentinel.client2) - document4 = self._make_one("X", "YY", client=mock.sentinel.client) - - pairs = ((document1, document2), (document1, document3), (document2, document3)) - for candidate1, candidate2 in pairs: - # We use == explicitly since assertNotEqual would use !=. - equality_val = candidate1 == candidate2 - self.assertFalse(equality_val) - - # Check the only equal one. - self.assertEqual(document1, document4) - self.assertIsNot(document1, document4) - - def test__eq__other_type(self): - document = self._make_one("X", "YY", client=mock.sentinel.client) - other = object() - equality_val = document == other - self.assertFalse(equality_val) - self.assertIs(document.__eq__(other), NotImplemented) - - def test___hash__(self): - client = mock.MagicMock() - client.__hash__.return_value = 234566789 - document = self._make_one("X", "YY", client=client) - self.assertEqual(hash(document), hash(("X", "YY")) + hash(client)) - - def test__ne__same_type(self): - document1 = self._make_one("X", "YY", client=mock.sentinel.client) - document2 = self._make_one("X", "ZZ", client=mock.sentinel.client) - document3 = self._make_one("X", "YY", client=mock.sentinel.client2) - document4 = self._make_one("X", "YY", client=mock.sentinel.client) - - self.assertNotEqual(document1, document2) - self.assertNotEqual(document1, document3) - self.assertNotEqual(document2, document3) - - # We use != explicitly since assertEqual would use ==. - inequality_val = document1 != document4 - self.assertFalse(inequality_val) - self.assertIsNot(document1, document4) - - def test__ne__other_type(self): - document = self._make_one("X", "YY", client=mock.sentinel.client) - other = object() - self.assertNotEqual(document, other) - self.assertIs(document.__ne__(other), NotImplemented) - - def test__document_path_property(self): - project = "hi-its-me-ok-bye" - client = _make_client(project=project) - - collection_id = "then" - document_id = "090909iii" - document = self._make_one(collection_id, document_id, client=client) - doc_path = document._document_path - expected = "projects/{}/databases/{}/documents/{}/{}".format( - project, client._database, collection_id, document_id - ) - self.assertEqual(doc_path, expected) - self.assertIs(document._document_path_internal, doc_path) - - # Make sure value is cached. - document._document_path_internal = mock.sentinel.cached - self.assertIs(document._document_path, mock.sentinel.cached) - - def test__document_path_property_no_client(self): - document = self._make_one("hi", "bye") - self.assertIsNone(document._client) - with self.assertRaises(ValueError): - getattr(document, "_document_path") - - self.assertIsNone(document._document_path_internal) - - def test_id_property(self): - document_id = "867-5309" - document = self._make_one("Co-lek-shun", document_id) - self.assertEqual(document.id, document_id) - - def test_parent_property(self): - from google.cloud.firestore_v1beta1.collection import CollectionReference - - collection_id = "grocery-store" - document_id = "market" - client = _make_client() - document = self._make_one(collection_id, document_id, client=client) - - parent = document.parent - self.assertIsInstance(parent, CollectionReference) - self.assertIs(parent._client, client) - self.assertEqual(parent._path, (collection_id,)) - - def test_collection_factory(self): - from google.cloud.firestore_v1beta1.collection import CollectionReference - - collection_id = "grocery-store" - document_id = "market" - new_collection = "fruits" - client = _make_client() - document = self._make_one(collection_id, document_id, client=client) - - child = document.collection(new_collection) - self.assertIsInstance(child, CollectionReference) - self.assertIs(child._client, client) - self.assertEqual(child._path, (collection_id, document_id, new_collection)) - - @staticmethod - def _write_pb_for_create(document_path, document_data): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ), - current_document=common_pb2.Precondition(exists=False), - ) - - @staticmethod - def _make_commit_repsonse(write_results=None): - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - - response = mock.create_autospec(firestore_pb2.CommitResponse) - response.write_results = write_results or [mock.sentinel.write_result] - response.commit_time = mock.sentinel.commit_time - return response - - def test_create(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("dignity") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("foo", "twelve", client=client) - document_data = {"hello": "goodbye", "count": 99} - write_result = document.create(document_data) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - write_pb = self._write_pb_for_create(document._document_path, document_data) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_create_empty(self): - # Create a minimal fake GAPIC with a dummy response. - from google.cloud.firestore_v1beta1.document import DocumentReference - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - firestore_api = mock.Mock(spec=["commit"]) - document_reference = mock.create_autospec(DocumentReference) - snapshot = mock.create_autospec(DocumentSnapshot) - snapshot.exists = True - document_reference.get.return_value = snapshot - firestore_api.commit.return_value = self._make_commit_repsonse( - write_results=[document_reference] - ) - - # Attach the fake GAPIC to a real client. - client = _make_client("dignity") - client._firestore_api_internal = firestore_api - client.get_all = mock.MagicMock() - client.get_all.exists.return_value = True - - # Actually make a document and call create(). - document = self._make_one("foo", "twelve", client=client) - document_data = {} - write_result = document.create(document_data) - self.assertTrue(write_result.get().exists) - - @staticmethod - def _write_pb_for_set(document_path, document_data, merge): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - write_pbs = write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(document_data) - ) - ) - if merge: - field_paths = [ - field_path - for field_path, value in _helpers.extract_fields( - document_data, _helpers.FieldPath() - ) - ] - field_paths = [ - field_path.to_api_repr() for field_path in sorted(field_paths) - ] - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - write_pbs.update_mask.CopyFrom(mask) - return write_pbs - - def _set_helper(self, merge=False, **option_kwargs): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("db-dee-bee") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("User", "Interface", client=client) - document_data = {"And": 500, "Now": b"\xba\xaa\xaa \xba\xaa\xaa"} - write_result = document.set(document_data, merge) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - write_pb = self._write_pb_for_set(document._document_path, document_data, merge) - - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_set(self): - self._set_helper() - - def test_set_merge(self): - self._set_helper(merge=True) - - @staticmethod - def _write_pb_for_update(document_path, update_values, field_paths): - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - return write_pb2.Write( - update=document_pb2.Document( - name=document_path, fields=_helpers.encode_dict(update_values) - ), - update_mask=common_pb2.DocumentMask(field_paths=field_paths), - current_document=common_pb2.Precondition(exists=True), - ) - - def _update_helper(self, **option_kwargs): - from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("potato-chip") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("baked", "Alaska", client=client) - # "Cheat" and use OrderedDict-s so that iteritems() is deterministic. - field_updates = collections.OrderedDict( - (("hello", 1), ("then.do", False), ("goodbye", DELETE_FIELD)) - ) - if option_kwargs: - option = client.write_option(**option_kwargs) - write_result = document.update(field_updates, option=option) - else: - option = None - write_result = document.update(field_updates) - - # Verify the response and the mocks. - self.assertIs(write_result, mock.sentinel.write_result) - update_values = { - "hello": field_updates["hello"], - "then": {"do": field_updates["then.do"]}, - } - field_paths = list(field_updates.keys()) - write_pb = self._write_pb_for_update( - document._document_path, update_values, sorted(field_paths) - ) - if option is not None: - option.modify_write(write_pb) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_update_with_exists(self): - with self.assertRaises(ValueError): - self._update_helper(exists=True) - - def test_update(self): - self._update_helper() - - def test_update_with_precondition(self): - from google.protobuf import timestamp_pb2 - - timestamp = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244) - self._update_helper(last_update_time=timestamp) - - def test_empty_update(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("potato-chip") - client._firestore_api_internal = firestore_api - - # Actually make a document and call create(). - document = self._make_one("baked", "Alaska", client=client) - # "Cheat" and use OrderedDict-s so that iteritems() is deterministic. - field_updates = {} - with self.assertRaises(ValueError): - document.update(field_updates) - - def _delete_helper(self, **option_kwargs): - from google.cloud.firestore_v1beta1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["commit"]) - firestore_api.commit.return_value = self._make_commit_repsonse() - - # Attach the fake GAPIC to a real client. - client = _make_client("donut-base") - client._firestore_api_internal = firestore_api - - # Actually make a document and call delete(). - document = self._make_one("where", "we-are", client=client) - if option_kwargs: - option = client.write_option(**option_kwargs) - delete_time = document.delete(option=option) - else: - option = None - delete_time = document.delete() - - # Verify the response and the mocks. - self.assertIs(delete_time, mock.sentinel.commit_time) - write_pb = write_pb2.Write(delete=document._document_path) - if option is not None: - option.modify_write(write_pb) - firestore_api.commit.assert_called_once_with( - client._database_string, - [write_pb], - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_delete(self): - self._delete_helper() - - def test_delete_with_option(self): - from google.protobuf import timestamp_pb2 - - timestamp_pb = timestamp_pb2.Timestamp(seconds=1058655101, nanos=100022244) - self._delete_helper(last_update_time=timestamp_pb) - - def _get_helper(self, field_paths=None, use_transaction=False, not_found=False): - from google.api_core.exceptions import NotFound - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.transaction import Transaction - - # Create a minimal fake GAPIC with a dummy response. - create_time = 123 - update_time = 234 - firestore_api = mock.Mock(spec=["get_document"]) - response = mock.create_autospec(document_pb2.Document) - response.fields = {} - response.create_time = create_time - response.update_time = update_time - - if not_found: - firestore_api.get_document.side_effect = NotFound("testing") - else: - firestore_api.get_document.return_value = response - - client = _make_client("donut-base") - client._firestore_api_internal = firestore_api - - document = self._make_one("where", "we-are", client=client) - - if use_transaction: - transaction = Transaction(client) - transaction_id = transaction._id = b"asking-me-2" - else: - transaction = None - - snapshot = document.get(field_paths=field_paths, transaction=transaction) - - self.assertIs(snapshot.reference, document) - if not_found: - self.assertIsNone(snapshot._data) - self.assertFalse(snapshot.exists) - self.assertIsNone(snapshot.read_time) - self.assertIsNone(snapshot.create_time) - self.assertIsNone(snapshot.update_time) - else: - self.assertEqual(snapshot.to_dict(), {}) - self.assertTrue(snapshot.exists) - self.assertIsNone(snapshot.read_time) - self.assertIs(snapshot.create_time, create_time) - self.assertIs(snapshot.update_time, update_time) - - # Verify the request made to the API - if field_paths is not None: - mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) - else: - mask = None - - if use_transaction: - expected_transaction_id = transaction_id - else: - expected_transaction_id = None - - firestore_api.get_document.assert_called_once_with( - document._document_path, - mask=mask, - transaction=expected_transaction_id, - metadata=client._rpc_metadata, - ) - - def test_get_not_found(self): - self._get_helper(not_found=True) - - def test_get_default(self): - self._get_helper() - - def test_get_w_string_field_path(self): - with self.assertRaises(ValueError): - self._get_helper(field_paths="foo") - - def test_get_with_field_path(self): - self._get_helper(field_paths=["foo"]) - - def test_get_with_multiple_field_paths(self): - self._get_helper(field_paths=["foo", "bar.baz"]) - - def test_get_with_transaction(self): - self._get_helper(use_transaction=True) - - def _collections_helper(self, page_size=None): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.firestore_v1beta1.collection import CollectionReference - from google.cloud.firestore_v1beta1.gapic.firestore_client import ( - FirestoreClient, - ) - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - collection_ids = ["coll-1", "coll-2"] - iterator = _Iterator(pages=[collection_ids]) - api_client = mock.create_autospec(FirestoreClient) - api_client.list_collection_ids.return_value = iterator - - client = _make_client() - client._firestore_api_internal = api_client - - # Actually make a document and call delete(). - document = self._make_one("where", "we-are", client=client) - if page_size is not None: - collections = list(document.collections(page_size=page_size)) - else: - collections = list(document.collections()) - - # Verify the response and the mocks. - self.assertEqual(len(collections), len(collection_ids)) - for collection, collection_id in zip(collections, collection_ids): - self.assertIsInstance(collection, CollectionReference) - self.assertEqual(collection.parent, document) - self.assertEqual(collection.id, collection_id) - - api_client.list_collection_ids.assert_called_once_with( - document._document_path, page_size=page_size, metadata=client._rpc_metadata - ) - - def test_collections_wo_page_size(self): - self._collections_helper() - - def test_collections_w_page_size(self): - self._collections_helper(page_size=10) - - @mock.patch("google.cloud.firestore_v1beta1.document.Watch", autospec=True) - def test_on_snapshot(self, watch): - client = mock.Mock(_database_string="sprinklez", spec=["_database_string"]) - document = self._make_one("yellow", "mellow", client=client) - document.on_snapshot(None) - watch.for_document.assert_called_once() - - -class TestDocumentSnapshot(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - return DocumentSnapshot - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def _make_reference(self, *args, **kwargs): - from google.cloud.firestore_v1beta1.document import DocumentReference - - return DocumentReference(*args, **kwargs) - - def _make_w_ref(self, ref_path=("a", "b"), data={}, exists=True): - client = mock.sentinel.client - reference = self._make_reference(*ref_path, client=client) - return self._make_one( - reference, - data, - exists, - mock.sentinel.read_time, - mock.sentinel.create_time, - mock.sentinel.update_time, - ) - - def test_constructor(self): - client = mock.sentinel.client - reference = self._make_reference("hi", "bye", client=client) - data = {"zoop": 83} - snapshot = self._make_one( - reference, - data, - True, - mock.sentinel.read_time, - mock.sentinel.create_time, - mock.sentinel.update_time, - ) - self.assertIs(snapshot._reference, reference) - self.assertEqual(snapshot._data, data) - self.assertIsNot(snapshot._data, data) # Make sure copied. - self.assertTrue(snapshot._exists) - self.assertIs(snapshot.read_time, mock.sentinel.read_time) - self.assertIs(snapshot.create_time, mock.sentinel.create_time) - self.assertIs(snapshot.update_time, mock.sentinel.update_time) - - def test___eq___other_type(self): - snapshot = self._make_w_ref() - other = object() - self.assertFalse(snapshot == other) - - def test___eq___different_reference_same_data(self): - snapshot = self._make_w_ref(("a", "b")) - other = self._make_w_ref(("c", "d")) - self.assertFalse(snapshot == other) - - def test___eq___same_reference_different_data(self): - snapshot = self._make_w_ref(("a", "b")) - other = self._make_w_ref(("a", "b"), {"foo": "bar"}) - self.assertFalse(snapshot == other) - - def test___eq___same_reference_same_data(self): - snapshot = self._make_w_ref(("a", "b"), {"foo": "bar"}) - other = self._make_w_ref(("a", "b"), {"foo": "bar"}) - self.assertTrue(snapshot == other) - - def test___hash__(self): - from google.protobuf import timestamp_pb2 - - client = mock.MagicMock() - client.__hash__.return_value = 234566789 - reference = self._make_reference("hi", "bye", client=client) - data = {"zoop": 83} - update_time = timestamp_pb2.Timestamp(seconds=123456, nanos=123456789) - snapshot = self._make_one( - reference, data, True, None, mock.sentinel.create_time, update_time - ) - self.assertEqual( - hash(snapshot), hash(reference) + hash(123456) + hash(123456789) - ) - - def test__client_property(self): - reference = self._make_reference( - "ok", "fine", "now", "fore", client=mock.sentinel.client - ) - snapshot = self._make_one(reference, {}, False, None, None, None) - self.assertIs(snapshot._client, mock.sentinel.client) - - def test_exists_property(self): - reference = mock.sentinel.reference - - snapshot1 = self._make_one(reference, {}, False, None, None, None) - self.assertFalse(snapshot1.exists) - snapshot2 = self._make_one(reference, {}, True, None, None, None) - self.assertTrue(snapshot2.exists) - - def test_id_property(self): - document_id = "around" - reference = self._make_reference( - "look", document_id, client=mock.sentinel.client - ) - snapshot = self._make_one(reference, {}, True, None, None, None) - self.assertEqual(snapshot.id, document_id) - self.assertEqual(reference.id, document_id) - - def test_reference_property(self): - snapshot = self._make_one(mock.sentinel.reference, {}, True, None, None, None) - self.assertIs(snapshot.reference, mock.sentinel.reference) - - def test_get(self): - data = {"one": {"bold": "move"}} - snapshot = self._make_one(None, data, True, None, None, None) - - first_read = snapshot.get("one") - second_read = snapshot.get("one") - self.assertEqual(first_read, data.get("one")) - self.assertIsNot(first_read, data.get("one")) - self.assertEqual(first_read, second_read) - self.assertIsNot(first_read, second_read) - - with self.assertRaises(KeyError): - snapshot.get("two") - - def test_nonexistent_snapshot(self): - snapshot = self._make_one(None, None, False, None, None, None) - self.assertIsNone(snapshot.get("one")) - - def test_to_dict(self): - data = {"a": 10, "b": ["definitely", "mutable"], "c": {"45": 50}} - snapshot = self._make_one(None, data, True, None, None, None) - as_dict = snapshot.to_dict() - self.assertEqual(as_dict, data) - self.assertIsNot(as_dict, data) - # Check that the data remains unchanged. - as_dict["b"].append("hi") - self.assertEqual(data, snapshot.to_dict()) - self.assertNotEqual(data, as_dict) - - def test_non_existent(self): - snapshot = self._make_one(None, None, False, None, None, None) - as_dict = snapshot.to_dict() - self.assertIsNone(as_dict) - - -class Test__get_document_path(unittest.TestCase): - @staticmethod - def _call_fut(client, path): - from google.cloud.firestore_v1beta1.document import _get_document_path - - return _get_document_path(client, path) - - def test_it(self): - project = "prah-jekt" - client = _make_client(project=project) - path = ("Some", "Document", "Child", "Shockument") - document_path = self._call_fut(client, path) - - expected = "projects/{}/databases/{}/documents/{}".format( - project, client._database, "/".join(path) - ) - self.assertEqual(document_path, expected) - - -class Test__consume_single_get(unittest.TestCase): - @staticmethod - def _call_fut(response_iterator): - from google.cloud.firestore_v1beta1.document import _consume_single_get - - return _consume_single_get(response_iterator) - - def test_success(self): - response_iterator = iter([mock.sentinel.result]) - result = self._call_fut(response_iterator) - self.assertIs(result, mock.sentinel.result) - - def test_failure_not_enough(self): - response_iterator = iter([]) - with self.assertRaises(ValueError): - self._call_fut(response_iterator) - - def test_failure_too_many(self): - response_iterator = iter([None, None]) - with self.assertRaises(ValueError): - self._call_fut(response_iterator) - - -class Test__first_write_result(unittest.TestCase): - @staticmethod - def _call_fut(write_results): - from google.cloud.firestore_v1beta1.document import _first_write_result - - return _first_write_result(write_results) - - def test_success(self): - from google.protobuf import timestamp_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - single_result = write_pb2.WriteResult( - update_time=timestamp_pb2.Timestamp(seconds=1368767504, nanos=458000123) - ) - write_results = [single_result] - result = self._call_fut(write_results) - self.assertIs(result, single_result) - - def test_failure_not_enough(self): - write_results = [] - with self.assertRaises(ValueError): - self._call_fut(write_results) - - def test_more_than_one(self): - from google.cloud.firestore_v1beta1.proto import write_pb2 - - result1 = write_pb2.WriteResult() - result2 = write_pb2.WriteResult() - write_results = [result1, result2] - result = self._call_fut(write_results) - self.assertIs(result, result1) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="project-project"): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - return Client(project=project, credentials=credentials) diff --git a/firestore/tests/unit/v1beta1/test_field_path.py b/firestore/tests/unit/v1beta1/test_field_path.py deleted file mode 100644 index 22f314e612af..000000000000 --- a/firestore/tests/unit/v1beta1/test_field_path.py +++ /dev/null @@ -1,495 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class Test__tokenize_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1beta1 import field_path - - return field_path._tokenize_field_path(path) - - def _expect(self, path, split_path): - self.assertEqual(list(self._call_fut(path)), split_path) - - def test_w_empty(self): - self._expect("", []) - - def test_w_single_dot(self): - self._expect(".", ["."]) - - def test_w_single_simple(self): - self._expect("abc", ["abc"]) - - def test_w_single_quoted(self): - self._expect("`c*de`", ["`c*de`"]) - - def test_w_quoted_embedded_dot(self): - self._expect("`c*.de`", ["`c*.de`"]) - - def test_w_quoted_escaped_backtick(self): - self._expect(r"`c*\`de`", [r"`c*\`de`"]) - - def test_w_dotted_quoted(self): - self._expect("`*`.`~`", ["`*`", ".", "`~`"]) - - def test_w_dotted(self): - self._expect("a.b.`c*de`", ["a", ".", "b", ".", "`c*de`"]) - - def test_w_dotted_escaped(self): - self._expect("_0.`1`.`+2`", ["_0", ".", "`1`", ".", "`+2`"]) - - def test_w_unconsumed_characters(self): - path = "a~b" - with self.assertRaises(ValueError): - list(self._call_fut(path)) - - -class Test_split_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.split_field_path(path) - - def test_w_single_dot(self): - with self.assertRaises(ValueError): - self._call_fut(".") - - def test_w_leading_dot(self): - with self.assertRaises(ValueError): - self._call_fut(".a.b.c") - - def test_w_trailing_dot(self): - with self.assertRaises(ValueError): - self._call_fut("a.b.") - - def test_w_missing_dot(self): - with self.assertRaises(ValueError): - self._call_fut("a`c*de`f") - - def test_w_half_quoted_field(self): - with self.assertRaises(ValueError): - self._call_fut("`c*de") - - def test_w_empty(self): - self.assertEqual(self._call_fut(""), []) - - def test_w_simple_field(self): - self.assertEqual(self._call_fut("a"), ["a"]) - - def test_w_dotted_field(self): - self.assertEqual(self._call_fut("a.b.cde"), ["a", "b", "cde"]) - - def test_w_quoted_field(self): - self.assertEqual(self._call_fut("a.b.`c*de`"), ["a", "b", "`c*de`"]) - - def test_w_quoted_field_escaped_backtick(self): - self.assertEqual(self._call_fut(r"`c*\`de`"), [r"`c*\`de`"]) - - -class Test_parse_field_path(unittest.TestCase): - @staticmethod - def _call_fut(path): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.parse_field_path(path) - - def test_wo_escaped_names(self): - self.assertEqual(self._call_fut("a.b.c"), ["a", "b", "c"]) - - def test_w_escaped_backtick(self): - self.assertEqual(self._call_fut("`a\\`b`.c.d"), ["a`b", "c", "d"]) - - def test_w_escaped_backslash(self): - self.assertEqual(self._call_fut("`a\\\\b`.c.d"), ["a\\b", "c", "d"]) - - def test_w_first_name_escaped_wo_closing_backtick(self): - with self.assertRaises(ValueError): - self._call_fut("`a\\`b.c.d") - - -class Test_render_field_path(unittest.TestCase): - @staticmethod - def _call_fut(field_names): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.render_field_path(field_names) - - def test_w_empty(self): - self.assertEqual(self._call_fut([]), "") - - def test_w_one_simple(self): - self.assertEqual(self._call_fut(["a"]), "a") - - def test_w_one_starts_w_digit(self): - self.assertEqual(self._call_fut(["0abc"]), "`0abc`") - - def test_w_one_w_non_alphanum(self): - self.assertEqual(self._call_fut(["a b c"]), "`a b c`") - - def test_w_one_w_backtick(self): - self.assertEqual(self._call_fut(["a`b"]), "`a\\`b`") - - def test_w_one_w_backslash(self): - self.assertEqual(self._call_fut(["a\\b"]), "`a\\\\b`") - - def test_multiple(self): - self.assertEqual(self._call_fut(["a", "b", "c"]), "a.b.c") - - -class Test_get_nested_value(unittest.TestCase): - - DATA = { - "top1": {"middle2": {"bottom3": 20, "bottom4": 22}, "middle5": True}, - "top6": b"\x00\x01 foo", - } - - @staticmethod - def _call_fut(path, data): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.get_nested_value(path, data) - - def test_simple(self): - self.assertIs(self._call_fut("top1", self.DATA), self.DATA["top1"]) - - def test_nested(self): - self.assertIs( - self._call_fut("top1.middle2", self.DATA), self.DATA["top1"]["middle2"] - ) - self.assertIs( - self._call_fut("top1.middle2.bottom3", self.DATA), - self.DATA["top1"]["middle2"]["bottom3"], - ) - - def test_missing_top_level(self): - from google.cloud.firestore_v1beta1.field_path import _FIELD_PATH_MISSING_TOP - - field_path = "top8" - with self.assertRaises(KeyError) as exc_info: - self._call_fut(field_path, self.DATA) - - err_msg = _FIELD_PATH_MISSING_TOP.format(field_path) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_missing_key(self): - from google.cloud.firestore_v1beta1.field_path import _FIELD_PATH_MISSING_KEY - - with self.assertRaises(KeyError) as exc_info: - self._call_fut("top1.middle2.nope", self.DATA) - - err_msg = _FIELD_PATH_MISSING_KEY.format("nope", "top1.middle2") - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test_bad_type(self): - from google.cloud.firestore_v1beta1.field_path import _FIELD_PATH_WRONG_TYPE - - with self.assertRaises(KeyError) as exc_info: - self._call_fut("top6.middle7", self.DATA) - - err_msg = _FIELD_PATH_WRONG_TYPE.format("top6", "middle7") - self.assertEqual(exc_info.exception.args, (err_msg,)) - - -class TestFieldPath(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1 import field_path - - return field_path.FieldPath - - def _make_one(self, *args): - klass = self._get_target_class() - return klass(*args) - - def test_ctor_w_none_in_part(self): - with self.assertRaises(ValueError): - self._make_one("a", None, "b") - - def test_ctor_w_empty_string_in_part(self): - with self.assertRaises(ValueError): - self._make_one("a", "", "b") - - def test_ctor_w_integer_part(self): - with self.assertRaises(ValueError): - self._make_one("a", 3, "b") - - def test_ctor_w_list(self): - parts = ["a", "b", "c"] - with self.assertRaises(ValueError): - self._make_one(parts) - - def test_ctor_w_tuple(self): - parts = ("a", "b", "c") - with self.assertRaises(ValueError): - self._make_one(parts) - - def test_ctor_w_iterable_part(self): - with self.assertRaises(ValueError): - self._make_one("a", ["a"], "b") - - def test_constructor_w_single_part(self): - field_path = self._make_one("a") - self.assertEqual(field_path.parts, ("a",)) - - def test_constructor_w_multiple_parts(self): - field_path = self._make_one("a", "b", "c") - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_ctor_w_invalid_chars_in_part(self): - invalid_parts = ("~", "*", "/", "[", "]", ".") - for invalid_part in invalid_parts: - field_path = self._make_one(invalid_part) - self.assertEqual(field_path.parts, (invalid_part,)) - - def test_ctor_w_double_dots(self): - field_path = self._make_one("a..b") - self.assertEqual(field_path.parts, ("a..b",)) - - def test_ctor_w_unicode(self): - field_path = self._make_one("一", "二", "三") - self.assertEqual(field_path.parts, ("一", "二", "三")) - - def test_from_api_repr_w_empty_string(self): - api_repr = "" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_empty_field_name(self): - api_repr = "a..b" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_invalid_chars(self): - invalid_parts = ("~", "*", "/", "[", "]", ".") - for invalid_part in invalid_parts: - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(invalid_part) - - def test_from_api_repr_w_ascii_single(self): - api_repr = "a" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a",)) - - def test_from_api_repr_w_ascii_dotted(self): - api_repr = "a.b.c" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_from_api_repr_w_non_ascii_dotted_non_quoted(self): - api_repr = "a.一" - with self.assertRaises(ValueError): - self._get_target_class().from_api_repr(api_repr) - - def test_from_api_repr_w_non_ascii_dotted_quoted(self): - api_repr = "a.`一`" - field_path = self._get_target_class().from_api_repr(api_repr) - self.assertEqual(field_path.parts, ("a", "一")) - - def test_from_string_w_empty_string(self): - path_string = "" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_empty_field_name(self): - path_string = "a..b" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_leading_dot(self): - path_string = ".b.c" - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_trailing_dot(self): - path_string = "a.b." - with self.assertRaises(ValueError): - self._get_target_class().from_string(path_string) - - def test_from_string_w_leading_invalid_chars(self): - invalid_paths = ("~", "*", "/", "[", "]") - for invalid_path in invalid_paths: - field_path = self._get_target_class().from_string(invalid_path) - self.assertEqual(field_path.parts, (invalid_path,)) - - def test_from_string_w_embedded_invalid_chars(self): - invalid_paths = ("a~b", "x*y", "f/g", "h[j", "k]l") - for invalid_path in invalid_paths: - with self.assertRaises(ValueError): - self._get_target_class().from_string(invalid_path) - - def test_from_string_w_ascii_single(self): - path_string = "a" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a",)) - - def test_from_string_w_ascii_dotted(self): - path_string = "a.b.c" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a", "b", "c")) - - def test_from_string_w_non_ascii_dotted(self): - path_string = "a.一" - field_path = self._get_target_class().from_string(path_string) - self.assertEqual(field_path.parts, ("a", "一")) - - def test___hash___w_single_part(self): - field_path = self._make_one("a") - self.assertEqual(hash(field_path), hash("a")) - - def test___hash___w_multiple_parts(self): - field_path = self._make_one("a", "b") - self.assertEqual(hash(field_path), hash("a.b")) - - def test___hash___w_escaped_parts(self): - field_path = self._make_one("a", "3") - self.assertEqual(hash(field_path), hash("a.`3`")) - - def test___eq___w_matching_type(self): - field_path = self._make_one("a", "b") - string_path = self._get_target_class().from_string("a.b") - self.assertEqual(field_path, string_path) - - def test___eq___w_non_matching_type(self): - field_path = self._make_one("a", "c") - other = mock.Mock() - other.parts = "a", "b" - self.assertNotEqual(field_path, other) - - def test___lt___w_matching_type(self): - field_path = self._make_one("a", "b") - string_path = self._get_target_class().from_string("a.c") - self.assertTrue(field_path < string_path) - - def test___lt___w_non_matching_type(self): - field_path = self._make_one("a", "b") - other = object() - # Python 2 doesn't raise TypeError here, but Python3 does. - self.assertIs(field_path.__lt__(other), NotImplemented) - - def test___add__(self): - path1 = "a123", "b456" - path2 = "c789", "d012" - path3 = "c789.d012" - field_path1 = self._make_one(*path1) - field_path1_string = self._make_one(*path1) - field_path2 = self._make_one(*path2) - field_path1 += field_path2 - field_path1_string += path3 - field_path2 = field_path2 + self._make_one(*path1) - self.assertEqual(field_path1, self._make_one(*(path1 + path2))) - self.assertEqual(field_path2, self._make_one(*(path2 + path1))) - self.assertEqual(field_path1_string, field_path1) - self.assertNotEqual(field_path1, field_path2) - with self.assertRaises(TypeError): - field_path1 + 305 - - def test_to_api_repr_a(self): - parts = "a" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "a") - - def test_to_api_repr_backtick(self): - parts = "`" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\``") - - def test_to_api_repr_dot(self): - parts = "." - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`.`") - - def test_to_api_repr_slash(self): - parts = "\\" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\\`") - - def test_to_api_repr_double_slash(self): - parts = r"\\" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), r"`\\\\`") - - def test_to_api_repr_underscore(self): - parts = "_33132" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "_33132") - - def test_to_api_repr_unicode_non_simple(self): - parts = "一" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`一`") - - def test_to_api_repr_number_non_simple(self): - parts = "03" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`03`") - - def test_to_api_repr_simple_with_dot(self): - field_path = self._make_one("a.b") - self.assertEqual(field_path.to_api_repr(), "`a.b`") - - def test_to_api_repr_non_simple_with_dot(self): - parts = "a.一" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "`a.一`") - - def test_to_api_repr_simple(self): - parts = "a0332432" - field_path = self._make_one(parts) - self.assertEqual(field_path.to_api_repr(), "a0332432") - - def test_to_api_repr_chain(self): - parts = "a", "`", "\\", "_3", "03", "a03", "\\\\", "a0332432", "一" - field_path = self._make_one(*parts) - self.assertEqual( - field_path.to_api_repr(), r"a.`\``.`\\`._3.`03`.a03.`\\\\`.a0332432.`一`" - ) - - def test_eq_or_parent_same(self): - field_path = self._make_one("a", "b") - other = self._make_one("a", "b") - self.assertTrue(field_path.eq_or_parent(other)) - - def test_eq_or_parent_prefix(self): - field_path = self._make_one("a", "b") - other = self._make_one("a", "b", "c") - self.assertTrue(field_path.eq_or_parent(other)) - self.assertTrue(other.eq_or_parent(field_path)) - - def test_eq_or_parent_no_prefix(self): - field_path = self._make_one("a", "b") - other = self._make_one("d", "e", "f") - self.assertFalse(field_path.eq_or_parent(other)) - self.assertFalse(other.eq_or_parent(field_path)) - - def test_lineage_empty(self): - field_path = self._make_one() - expected = set() - self.assertEqual(field_path.lineage(), expected) - - def test_lineage_single(self): - field_path = self._make_one("a") - expected = set() - self.assertEqual(field_path.lineage(), expected) - - def test_lineage_nested(self): - field_path = self._make_one("a", "b", "c") - expected = set([self._make_one("a"), self._make_one("a", "b")]) - self.assertEqual(field_path.lineage(), expected) diff --git a/firestore/tests/unit/v1beta1/test_order.py b/firestore/tests/unit/v1beta1/test_order.py deleted file mode 100644 index a68f3ae1b250..000000000000 --- a/firestore/tests/unit/v1beta1/test_order.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http:#www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import six -import unittest - -from google.cloud.firestore_v1beta1._helpers import encode_value, GeoPoint -from google.cloud.firestore_v1beta1.order import Order -from google.cloud.firestore_v1beta1.order import TypeOrder - -from google.cloud.firestore_v1beta1.proto import document_pb2 - -from google.protobuf import timestamp_pb2 - - -class TestOrder(unittest.TestCase): - - if six.PY2: - assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.order import Order - - return Order - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_order(self): - # Constants used to represent min/max values of storage types. - int_max_value = 2 ** 31 - 1 - int_min_value = -(2 ** 31) - float_min_value = 1.175494351 ** -38 - float_nan = float("nan") - inf = float("inf") - - groups = [None] * 65 - - groups[0] = [nullValue()] - - groups[1] = [_boolean_value(False)] - groups[2] = [_boolean_value(True)] - - # numbers - groups[3] = [_double_value(float_nan), _double_value(float_nan)] - groups[4] = [_double_value(-inf)] - groups[5] = [_int_value(int_min_value - 1)] - groups[6] = [_int_value(int_min_value)] - groups[7] = [_double_value(-1.1)] - # Integers and Doubles order the same. - groups[8] = [_int_value(-1), _double_value(-1.0)] - groups[9] = [_double_value(-float_min_value)] - # zeros all compare the same. - groups[10] = [ - _int_value(0), - _double_value(-0.0), - _double_value(0.0), - _double_value(+0.0), - ] - groups[11] = [_double_value(float_min_value)] - groups[12] = [_int_value(1), _double_value(1.0)] - groups[13] = [_double_value(1.1)] - groups[14] = [_int_value(int_max_value)] - groups[15] = [_int_value(int_max_value + 1)] - groups[16] = [_double_value(inf)] - - groups[17] = [_timestamp_value(123, 0)] - groups[18] = [_timestamp_value(123, 123)] - groups[19] = [_timestamp_value(345, 0)] - - # strings - groups[20] = [_string_value("")] - groups[21] = [_string_value("\u0000\ud7ff\ue000\uffff")] - groups[22] = [_string_value("(╯°□°)╯︵ ┻━┻")] - groups[23] = [_string_value("a")] - groups[24] = [_string_value("abc def")] - # latin small letter e + combining acute accent + latin small letter b - groups[25] = [_string_value("e\u0301b")] - groups[26] = [_string_value("æ")] - # latin small letter e with acute accent + latin small letter a - groups[27] = [_string_value("\u00e9a")] - - # blobs - groups[28] = [_blob_value(b"")] - groups[29] = [_blob_value(b"\x00")] - groups[30] = [_blob_value(b"\x00\x01\x02\x03\x04")] - groups[31] = [_blob_value(b"\x00\x01\x02\x04\x03")] - groups[32] = [_blob_value(b"\x7f")] - - # resource names - groups[33] = [_reference_value("projects/p1/databases/d1/documents/c1/doc1")] - groups[34] = [_reference_value("projects/p1/databases/d1/documents/c1/doc2")] - groups[35] = [ - _reference_value("projects/p1/databases/d1/documents/c1/doc2/c2/doc1") - ] - groups[36] = [ - _reference_value("projects/p1/databases/d1/documents/c1/doc2/c2/doc2") - ] - groups[37] = [_reference_value("projects/p1/databases/d1/documents/c10/doc1")] - groups[38] = [_reference_value("projects/p1/databases/d1/documents/c2/doc1")] - groups[39] = [_reference_value("projects/p2/databases/d2/documents/c1/doc1")] - groups[40] = [_reference_value("projects/p2/databases/d2/documents/c1-/doc1")] - groups[41] = [_reference_value("projects/p2/databases/d3/documents/c1-/doc1")] - - # geo points - groups[42] = [_geoPoint_value(-90, -180)] - groups[43] = [_geoPoint_value(-90, 0)] - groups[44] = [_geoPoint_value(-90, 180)] - groups[45] = [_geoPoint_value(0, -180)] - groups[46] = [_geoPoint_value(0, 0)] - groups[47] = [_geoPoint_value(0, 180)] - groups[48] = [_geoPoint_value(1, -180)] - groups[49] = [_geoPoint_value(1, 0)] - groups[50] = [_geoPoint_value(1, 180)] - groups[51] = [_geoPoint_value(90, -180)] - groups[52] = [_geoPoint_value(90, 0)] - groups[53] = [_geoPoint_value(90, 180)] - - # arrays - groups[54] = [_array_value()] - groups[55] = [_array_value(["bar"])] - groups[56] = [_array_value(["foo"])] - groups[57] = [_array_value(["foo", 0])] - groups[58] = [_array_value(["foo", 1])] - groups[59] = [_array_value(["foo", "0"])] - - # objects - groups[60] = [_object_value({"bar": 0})] - groups[61] = [_object_value({"bar": 0, "foo": 1})] - groups[62] = [_object_value({"bar": 1})] - groups[63] = [_object_value({"bar": 2})] - groups[64] = [_object_value({"bar": "0"})] - - target = self._make_one() - - for i in range(len(groups)): - for left in groups[i]: - for j in range(len(groups)): - for right in groups[j]: - expected = Order._compare_to(i, j) - - self.assertEqual( - target.compare(left, right), - expected, - "comparing L->R {} ({}) to {} ({})".format( - i, left, j, right - ), - ) - - expected = Order._compare_to(j, i) - self.assertEqual( - target.compare(right, left), - expected, - "comparing R->L {} ({}) to {} ({})".format( - j, right, i, left - ), - ) - - def test_typeorder_type_failure(self): - target = self._make_one() - left = mock.Mock() - left.WhichOneof.return_value = "imaginary-type" - - with self.assertRaisesRegex(ValueError, "Could not detect value"): - target.compare(left, mock.Mock()) - - def test_failure_to_find_type(self): - target = self._make_one() - left = mock.Mock() - left.WhichOneof.return_value = "imaginary-type" - right = mock.Mock() - # Patch from value to get to the deep compare. Since left is a bad type - # expect this to fail with value error. - with mock.patch.object(TypeOrder, "from_value") as to: - to.value = None - with self.assertRaisesRegex(ValueError, "'Unknown ``value_type``"): - target.compare(left, right) - - def test_compare_objects_different_keys(self): - left = _object_value({"foo": 0}) - right = _object_value({"bar": 0}) - - target = self._make_one() - target.compare(left, right) - - -def _boolean_value(b): - return encode_value(b) - - -def _double_value(d): - return encode_value(d) - - -def _int_value(l): - return encode_value(l) - - -def _string_value(s): - if not isinstance(s, six.text_type): - s = six.u(s) - return encode_value(s) - - -def _reference_value(r): - return document_pb2.Value(reference_value=r) - - -def _blob_value(b): - return encode_value(b) - - -def nullValue(): - return encode_value(None) - - -def _timestamp_value(seconds, nanos): - return document_pb2.Value( - timestamp_value=timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) - ) - - -def _geoPoint_value(latitude, longitude): - return encode_value(GeoPoint(latitude, longitude)) - - -def _array_value(values=[]): - return encode_value(values) - - -def _object_value(keysAndValues): - return encode_value(keysAndValues) diff --git a/firestore/tests/unit/v1beta1/test_query.py b/firestore/tests/unit/v1beta1/test_query.py deleted file mode 100644 index 455a56b7f7ec..000000000000 --- a/firestore/tests/unit/v1beta1/test_query.py +++ /dev/null @@ -1,1590 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import types -import unittest - -import mock -import pytest -import six - - -class TestQuery(unittest.TestCase): - - if six.PY2: - assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.query import Query - - return Query - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor_defaults(self): - query = self._make_one(mock.sentinel.parent) - self.assertIs(query._parent, mock.sentinel.parent) - self.assertIsNone(query._projection) - self.assertEqual(query._field_filters, ()) - self.assertEqual(query._orders, ()) - self.assertIsNone(query._limit) - self.assertIsNone(query._offset) - self.assertIsNone(query._start_at) - self.assertIsNone(query._end_at) - - def _make_one_all_fields(self, limit=9876, offset=12, skip_fields=(), parent=None): - kwargs = { - "projection": mock.sentinel.projection, - "field_filters": mock.sentinel.filters, - "orders": mock.sentinel.orders, - "limit": limit, - "offset": offset, - "start_at": mock.sentinel.start_at, - "end_at": mock.sentinel.end_at, - } - for field in skip_fields: - kwargs.pop(field) - if parent is None: - parent = mock.sentinel.parent - return self._make_one(parent, **kwargs) - - def test_constructor_explicit(self): - limit = 234 - offset = 56 - query = self._make_one_all_fields(limit=limit, offset=offset) - self.assertIs(query._parent, mock.sentinel.parent) - self.assertIs(query._projection, mock.sentinel.projection) - self.assertIs(query._field_filters, mock.sentinel.filters) - self.assertEqual(query._orders, mock.sentinel.orders) - self.assertEqual(query._limit, limit) - self.assertEqual(query._offset, offset) - self.assertIs(query._start_at, mock.sentinel.start_at) - self.assertIs(query._end_at, mock.sentinel.end_at) - - def test__client_property(self): - parent = mock.Mock(_client=mock.sentinel.client, spec=["_client"]) - query = self._make_one(parent) - self.assertIs(query._client, mock.sentinel.client) - - def test___eq___other_type(self): - client = self._make_one_all_fields() - other = object() - self.assertFalse(client == other) - - def test___eq___different_parent(self): - parent = mock.sentinel.parent - other_parent = mock.sentinel.other_parent - client = self._make_one_all_fields(parent=parent) - other = self._make_one_all_fields(parent=other_parent) - self.assertFalse(client == other) - - def test___eq___different_projection(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, skip_fields=("projection",)) - client._projection = mock.sentinel.projection - other = self._make_one_all_fields(parent=parent, skip_fields=("projection",)) - other._projection = mock.sentinel.other_projection - self.assertFalse(client == other) - - def test___eq___different_field_filters(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields( - parent=parent, skip_fields=("field_filters",) - ) - client._field_filters = mock.sentinel.field_filters - other = self._make_one_all_fields(parent=parent, skip_fields=("field_filters",)) - other._field_filters = mock.sentinel.other_field_filters - self.assertFalse(client == other) - - def test___eq___different_orders(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, skip_fields=("orders",)) - client._orders = mock.sentinel.orders - other = self._make_one_all_fields(parent=parent, skip_fields=("orders",)) - other._orders = mock.sentinel.other_orders - self.assertFalse(client == other) - - def test___eq___different_limit(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, limit=10) - other = self._make_one_all_fields(parent=parent, limit=20) - self.assertFalse(client == other) - - def test___eq___different_offset(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, offset=10) - other = self._make_one_all_fields(parent=parent, offset=20) - self.assertFalse(client == other) - - def test___eq___different_start_at(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, skip_fields=("start_at",)) - client._start_at = mock.sentinel.start_at - other = self._make_one_all_fields(parent=parent, skip_fields=("start_at",)) - other._start_at = mock.sentinel.other_start_at - self.assertFalse(client == other) - - def test___eq___different_end_at(self): - parent = mock.sentinel.parent - client = self._make_one_all_fields(parent=parent, skip_fields=("end_at",)) - client._end_at = mock.sentinel.end_at - other = self._make_one_all_fields(parent=parent, skip_fields=("end_at",)) - other._end_at = mock.sentinel.other_end_at - self.assertFalse(client == other) - - def test___eq___hit(self): - client = self._make_one_all_fields() - other = self._make_one_all_fields() - self.assertTrue(client == other) - - def _compare_queries(self, query1, query2, attr_name): - attrs1 = query1.__dict__.copy() - attrs2 = query2.__dict__.copy() - - attrs1.pop(attr_name) - attrs2.pop(attr_name) - - # The only different should be in ``attr_name``. - self.assertEqual(len(attrs1), len(attrs2)) - for key, value in attrs1.items(): - self.assertIs(value, attrs2[key]) - - @staticmethod - def _make_projection_for_select(field_paths): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - return query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ) - - def test_select_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.select(["*"]) - - def test_select(self): - query1 = self._make_one_all_fields() - - field_paths2 = ["foo", "bar"] - query2 = query1.select(field_paths2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual( - query2._projection, self._make_projection_for_select(field_paths2) - ) - self._compare_queries(query1, query2, "_projection") - - # Make sure it overrides. - field_paths3 = ["foo.baz"] - query3 = query2.select(field_paths3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual( - query3._projection, self._make_projection_for_select(field_paths3) - ) - self._compare_queries(query2, query3, "_projection") - - def test_where_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.where("*", "==", 1) - - def test_where(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - query = self._make_one_all_fields(skip_fields=("field_filters",)) - new_query = query.where("power.level", ">", 9000) - - self.assertIsNot(query, new_query) - self.assertIsInstance(new_query, self._get_target_class()) - self.assertEqual(len(new_query._field_filters), 1) - - field_pb = new_query._field_filters[0] - expected_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="power.level"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(integer_value=9000), - ) - self.assertEqual(field_pb, expected_pb) - self._compare_queries(query, new_query, "_field_filters") - - def _where_unary_helper(self, value, op_enum, op_string="=="): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - query = self._make_one_all_fields(skip_fields=("field_filters",)) - field_path = "feeeld" - new_query = query.where(field_path, op_string, value) - - self.assertIsNot(query, new_query) - self.assertIsInstance(new_query, self._get_target_class()) - self.assertEqual(len(new_query._field_filters), 1) - - field_pb = new_query._field_filters[0] - expected_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - op=op_enum, - ) - self.assertEqual(field_pb, expected_pb) - self._compare_queries(query, new_query, "_field_filters") - - def test_where_eq_null(self): - from google.cloud.firestore_v1beta1.gapic import enums - - op_enum = enums.StructuredQuery.UnaryFilter.Operator.IS_NULL - self._where_unary_helper(None, op_enum) - - def test_where_gt_null(self): - with self.assertRaises(ValueError): - self._where_unary_helper(None, 0, op_string=">") - - def test_where_eq_nan(self): - from google.cloud.firestore_v1beta1.gapic import enums - - op_enum = enums.StructuredQuery.UnaryFilter.Operator.IS_NAN - self._where_unary_helper(float("nan"), op_enum) - - def test_where_le_nan(self): - with self.assertRaises(ValueError): - self._where_unary_helper(float("nan"), 0, op_string="<=") - - def test_where_w_delete(self): - from google.cloud.firestore_v1beta1 import DELETE_FIELD - - with self.assertRaises(ValueError): - self._where_unary_helper(DELETE_FIELD, 0) - - def test_where_w_server_timestamp(self): - from google.cloud.firestore_v1beta1 import SERVER_TIMESTAMP - - with self.assertRaises(ValueError): - self._where_unary_helper(SERVER_TIMESTAMP, 0) - - def test_where_w_array_remove(self): - from google.cloud.firestore_v1beta1 import ArrayRemove - - with self.assertRaises(ValueError): - self._where_unary_helper(ArrayRemove([1, 3, 5]), 0) - - def test_where_w_array_union(self): - from google.cloud.firestore_v1beta1 import ArrayUnion - - with self.assertRaises(ValueError): - self._where_unary_helper(ArrayUnion([2, 4, 8]), 0) - - def test_order_by_invalid_path(self): - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query.order_by("*") - - def test_order_by(self): - from google.cloud.firestore_v1beta1.gapic import enums - - klass = self._get_target_class() - query1 = self._make_one_all_fields(skip_fields=("orders",)) - - field_path2 = "a" - query2 = query1.order_by(field_path2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, klass) - order_pb2 = _make_order_pb( - field_path2, enums.StructuredQuery.Direction.ASCENDING - ) - self.assertEqual(query2._orders, (order_pb2,)) - self._compare_queries(query1, query2, "_orders") - - # Make sure it appends to the orders. - field_path3 = "b" - query3 = query2.order_by(field_path3, direction=klass.DESCENDING) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, klass) - order_pb3 = _make_order_pb( - field_path3, enums.StructuredQuery.Direction.DESCENDING - ) - self.assertEqual(query3._orders, (order_pb2, order_pb3)) - self._compare_queries(query2, query3, "_orders") - - def test_limit(self): - query1 = self._make_one_all_fields() - - limit2 = 100 - query2 = query1.limit(limit2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual(query2._limit, limit2) - self._compare_queries(query1, query2, "_limit") - - # Make sure it overrides. - limit3 = 10 - query3 = query2.limit(limit3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._limit, limit3) - self._compare_queries(query2, query3, "_limit") - - def test_offset(self): - query1 = self._make_one_all_fields() - - offset2 = 23 - query2 = query1.offset(offset2) - self.assertIsNot(query2, query1) - self.assertIsInstance(query2, self._get_target_class()) - self.assertEqual(query2._offset, offset2) - self._compare_queries(query1, query2, "_offset") - - # Make sure it overrides. - offset3 = 35 - query3 = query2.offset(offset3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._offset, offset3) - self._compare_queries(query2, query3, "_offset") - - @staticmethod - def _make_collection(*path, **kw): - from google.cloud.firestore_v1beta1 import collection - - return collection.CollectionReference(*path, **kw) - - @staticmethod - def _make_docref(*path, **kw): - from google.cloud.firestore_v1beta1 import document - - return document.DocumentReference(*path, **kw) - - @staticmethod - def _make_snapshot(docref, values): - from google.cloud.firestore_v1beta1 import document - - return document.DocumentSnapshot(docref, values, True, None, None, None) - - def test__cursor_helper_w_dict(self): - values = {"a": 7, "b": "foo"} - query1 = self._make_one(mock.sentinel.parent) - query2 = query1._cursor_helper(values, True, True) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._end_at) - - cursor, before = query2._start_at - - self.assertEqual(cursor, values) - self.assertTrue(before) - - def test__cursor_helper_w_tuple(self): - values = (7, "foo") - query1 = self._make_one(mock.sentinel.parent) - query2 = query1._cursor_helper(values, False, True) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._end_at) - - cursor, before = query2._start_at - - self.assertEqual(cursor, list(values)) - self.assertFalse(before) - - def test__cursor_helper_w_list(self): - values = [7, "foo"] - query1 = self._make_one(mock.sentinel.parent) - query2 = query1._cursor_helper(values, True, False) - - self.assertIs(query2._parent, mock.sentinel.parent) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, query1._orders) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._start_at) - - cursor, before = query2._end_at - - self.assertEqual(cursor, values) - self.assertIsNot(cursor, values) - self.assertTrue(before) - - def test__cursor_helper_w_snapshot_wrong_collection(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("there", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection) - - with self.assertRaises(ValueError): - query._cursor_helper(snapshot, False, False) - - def test__cursor_helper_w_snapshot(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query1 = self._make_one(collection) - - query2 = query1._cursor_helper(snapshot, False, False) - - self.assertIs(query2._parent, collection) - self.assertIsNone(query2._projection) - self.assertEqual(query2._field_filters, ()) - self.assertEqual(query2._orders, ()) - self.assertIsNone(query2._limit) - self.assertIsNone(query2._offset) - self.assertIsNone(query2._start_at) - - cursor, before = query2._end_at - - self.assertIs(cursor, snapshot) - self.assertFalse(before) - - def test_start_at(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("hi") - - document_fields3 = {"hi": "mom"} - query3 = query2.start_at(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._start_at, (document_fields3, True)) - self._compare_queries(query2, query3, "_start_at") - - # Make sure it overrides. - query4 = query3.order_by("bye") - values5 = {"hi": "zap", "bye": 88} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.start_at(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._start_at, (document_fields5, True)) - self._compare_queries(query4, query5, "_start_at") - - def test_start_after(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("down") - - document_fields3 = {"down": 99.75} - query3 = query2.start_after(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._start_at, (document_fields3, False)) - self._compare_queries(query2, query3, "_start_at") - - # Make sure it overrides. - query4 = query3.order_by("out") - values5 = {"down": 100.25, "out": b"\x00\x01"} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.start_after(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._start_at, (document_fields5, False)) - self._compare_queries(query4, query5, "_start_at") - - def test_end_before(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("down") - - document_fields3 = {"down": 99.75} - query3 = query2.end_before(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._end_at, (document_fields3, True)) - self._compare_queries(query2, query3, "_end_at") - - # Make sure it overrides. - query4 = query3.order_by("out") - values5 = {"down": 100.25, "out": b"\x00\x01"} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.end_before(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._end_at, (document_fields5, True)) - self._compare_queries(query4, query5, "_end_at") - self._compare_queries(query4, query5, "_end_at") - - def test_end_at(self): - collection = self._make_collection("here") - query1 = self._make_one_all_fields(parent=collection, skip_fields=("orders",)) - query2 = query1.order_by("hi") - - document_fields3 = {"hi": "mom"} - query3 = query2.end_at(document_fields3) - self.assertIsNot(query3, query2) - self.assertIsInstance(query3, self._get_target_class()) - self.assertEqual(query3._end_at, (document_fields3, False)) - self._compare_queries(query2, query3, "_end_at") - - # Make sure it overrides. - query4 = query3.order_by("bye") - values5 = {"hi": "zap", "bye": 88} - docref = self._make_docref("here", "doc_id") - document_fields5 = self._make_snapshot(docref, values5) - query5 = query4.end_at(document_fields5) - self.assertIsNot(query5, query4) - self.assertIsInstance(query5, self._get_target_class()) - self.assertEqual(query5._end_at, (document_fields5, False)) - self._compare_queries(query4, query5, "_end_at") - - def test__filters_pb_empty(self): - query = self._make_one(mock.sentinel.parent) - self.assertEqual(len(query._field_filters), 0) - self.assertIsNone(query._filters_pb()) - - def test__filters_pb_single(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - query1 = self._make_one(mock.sentinel.parent) - query2 = query1.where("x.y", ">", 50.5) - filter_pb = query2._filters_pb() - expected_pb = query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="x.y"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=50.5), - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test__filters_pb_multi(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - query1 = self._make_one(mock.sentinel.parent) - query2 = query1.where("x.y", ">", 50.5) - query3 = query2.where("ABC", "==", 123) - - filter_pb = query3._filters_pb() - op_class = enums.StructuredQuery.FieldFilter.Operator - expected_pb = query_pb2.StructuredQuery.Filter( - composite_filter=query_pb2.StructuredQuery.CompositeFilter( - op=enums.StructuredQuery.CompositeFilter.Operator.AND, - filters=[ - query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference( - field_path="x.y" - ), - op=op_class.GREATER_THAN, - value=document_pb2.Value(double_value=50.5), - ) - ), - query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference( - field_path="ABC" - ), - op=op_class.EQUAL, - value=document_pb2.Value(integer_value=123), - ) - ), - ], - ) - ) - self.assertEqual(filter_pb, expected_pb) - - def test__normalize_projection_none(self): - query = self._make_one(mock.sentinel.parent) - self.assertIsNone(query._normalize_projection(None)) - - def test__normalize_projection_empty(self): - projection = self._make_projection_for_select([]) - query = self._make_one(mock.sentinel.parent) - normalized = query._normalize_projection(projection) - field_paths = [field_ref.field_path for field_ref in normalized.fields] - self.assertEqual(field_paths, ["__name__"]) - - def test__normalize_projection_non_empty(self): - projection = self._make_projection_for_select(["a", "b"]) - query = self._make_one(mock.sentinel.parent) - self.assertIs(query._normalize_projection(projection), projection) - - def test__normalize_orders_wo_orders_wo_cursors(self): - query = self._make_one(mock.sentinel.parent) - expected = [] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_w_orders_wo_cursors(self): - query = self._make_one(mock.sentinel.parent).order_by("a") - expected = [query._make_order("a", "ASCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection).start_at(snapshot) - expected = [query._make_order("__name__", "ASCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_w_name_orders_w_snapshot_cursor(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = ( - self._make_one(collection) - .order_by("__name__", "DESCENDING") - .start_at(snapshot) - ) - expected = [query._make_order("__name__", "DESCENDING")] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_exists(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = ( - self._make_one(collection) - .where("c", "<=", 20) - .order_by("c", "DESCENDING") - .start_at(snapshot) - ) - expected = [ - query._make_order("c", "DESCENDING"), - query._make_order("__name__", "DESCENDING"), - ] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_where(self): - values = {"a": 7, "b": "foo"} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - collection = self._make_collection("here") - query = self._make_one(collection).where("c", "<=", 20).end_at(snapshot) - expected = [ - query._make_order("c", "ASCENDING"), - query._make_order("__name__", "ASCENDING"), - ] - self.assertEqual(query._normalize_orders(), expected) - - def test__normalize_cursor_none(self): - query = self._make_one(mock.sentinel.parent) - self.assertIsNone(query._normalize_cursor(None, query._orders)) - - def test__normalize_cursor_no_order(self): - cursor = ([1], True) - query = self._make_one(mock.sentinel.parent) - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_list_mismatched_order(self): - cursor = ([1, 2], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_dict_mismatched_order(self): - cursor = ({"a": 1}, True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_delete(self): - from google.cloud.firestore_v1beta1 import DELETE_FIELD - - cursor = ([DELETE_FIELD], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_server_timestamp(self): - from google.cloud.firestore_v1beta1 import SERVER_TIMESTAMP - - cursor = ([SERVER_TIMESTAMP], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_array_remove(self): - from google.cloud.firestore_v1beta1 import ArrayRemove - - cursor = ([ArrayRemove([1, 3, 5])], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_w_array_union(self): - from google.cloud.firestore_v1beta1 import ArrayUnion - - cursor = ([ArrayUnion([2, 4, 8])], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - with self.assertRaises(ValueError): - query._normalize_cursor(cursor, query._orders) - - def test__normalize_cursor_as_list_hit(self): - cursor = ([1], True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_dict_hit(self): - cursor = ({"b": 1}, True) - query = self._make_one(mock.sentinel.parent).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_as_snapshot_hit(self): - values = {"b": 1} - docref = self._make_docref("here", "doc_id") - snapshot = self._make_snapshot(docref, values) - cursor = (snapshot, True) - collection = self._make_collection("here") - query = self._make_one(collection).order_by("b", "ASCENDING") - - self.assertEqual(query._normalize_cursor(cursor, query._orders), ([1], True)) - - def test__normalize_cursor_w___name___w_reference(self): - db_string = "projects/my-project/database/(default)" - client = mock.Mock(spec=["_database_string"]) - client._database_string = db_string - parent = mock.Mock(spec=["_path", "_client"]) - parent._client = client - parent._path = ["C"] - query = self._make_one(parent).order_by("__name__", "ASCENDING") - docref = self._make_docref("here", "doc_id") - values = {"a": 7} - snapshot = self._make_snapshot(docref, values) - expected = docref - cursor = (snapshot, True) - - self.assertEqual( - query._normalize_cursor(cursor, query._orders), ([expected], True) - ) - - def test__normalize_cursor_w___name___wo_slash(self): - db_string = "projects/my-project/database/(default)" - client = mock.Mock(spec=["_database_string"]) - client._database_string = db_string - parent = mock.Mock(spec=["_path", "_client", "document"]) - parent._client = client - parent._path = ["C"] - document = parent.document.return_value = mock.Mock(spec=[]) - query = self._make_one(parent).order_by("__name__", "ASCENDING") - cursor = (["b"], True) - expected = document - - self.assertEqual( - query._normalize_cursor(cursor, query._orders), ([expected], True) - ) - parent.document.assert_called_once_with("b") - - def test__to_protobuf_all_fields(self): - from google.protobuf import wrappers_pb2 - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="cat", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.select(["X", "Y", "Z"]) - query3 = query2.where("Y", ">", 2.5) - query4 = query3.order_by("X") - query5 = query4.limit(17) - query6 = query5.offset(3) - query7 = query6.start_at({"X": 10}) - query8 = query7.end_at({"X": 25}) - - structured_query_pb = query8._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "select": query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in ["X", "Y", "Z"] - ] - ), - "where": query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="Y"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=2.5), - ) - ), - "order_by": [ - _make_order_pb("X", enums.StructuredQuery.Direction.ASCENDING) - ], - "start_at": query_pb2.Cursor( - values=[document_pb2.Value(integer_value=10)], before=True - ), - "end_at": query_pb2.Cursor(values=[document_pb2.Value(integer_value=25)]), - "offset": 3, - "limit": wrappers_pb2.Int32Value(value=17), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_select_only(self): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="cat", spec=["id"]) - query1 = self._make_one(parent) - field_paths = ["a.b", "a.c", "d"] - query2 = query1.select(field_paths) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "select": query_pb2.StructuredQuery.Projection( - fields=[ - query_pb2.StructuredQuery.FieldReference(field_path=field_path) - for field_path in field_paths - ] - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_where_only(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="dog", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.where("a", "==", u"b") - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "where": query_pb2.StructuredQuery.Filter( - field_filter=query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="a"), - op=enums.StructuredQuery.FieldFilter.Operator.EQUAL, - value=document_pb2.Value(string_value=u"b"), - ) - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_order_by_only(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="fish", spec=["id"]) - query1 = self._make_one(parent) - query2 = query1.order_by("abc") - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("abc", enums.StructuredQuery.Direction.ASCENDING) - ], - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_start_at_only(self): - # NOTE: "only" is wrong since we must have ``order_by`` as well. - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="phish", spec=["id"]) - query = self._make_one(parent).order_by("X.Y").start_after({"X": {"Y": u"Z"}}) - - structured_query_pb = query._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("X.Y", enums.StructuredQuery.Direction.ASCENDING) - ], - "start_at": query_pb2.Cursor( - values=[document_pb2.Value(string_value=u"Z")] - ), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_end_at_only(self): - # NOTE: "only" is wrong since we must have ``order_by`` as well. - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="ghoti", spec=["id"]) - query = self._make_one(parent).order_by("a").end_at({"a": 88}) - - structured_query_pb = query._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "order_by": [ - _make_order_pb("a", enums.StructuredQuery.Direction.ASCENDING) - ], - "end_at": query_pb2.Cursor(values=[document_pb2.Value(integer_value=88)]), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_offset_only(self): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="cartt", spec=["id"]) - query1 = self._make_one(parent) - offset = 14 - query2 = query1.offset(offset) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "offset": offset, - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - self.assertEqual(structured_query_pb, expected_pb) - - def test__to_protobuf_limit_only(self): - from google.protobuf import wrappers_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - parent = mock.Mock(id="donut", spec=["id"]) - query1 = self._make_one(parent) - limit = 31 - query2 = query1.limit(limit) - - structured_query_pb = query2._to_protobuf() - query_kwargs = { - "from": [ - query_pb2.StructuredQuery.CollectionSelector(collection_id=parent.id) - ], - "limit": wrappers_pb2.Int32Value(value=limit), - } - expected_pb = query_pb2.StructuredQuery(**query_kwargs) - - self.assertEqual(structured_query_pb, expected_pb) - - def test_get_simple(self): - import warnings - - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dee") - - # Add a dummy response to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/sleep".format(expected_prefix) - data = {"snooze": 10} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - - with warnings.catch_warnings(record=True) as warned: - get_response = query.get() - - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("dee", "sleep")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - # Verify the deprecation - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - def test_stream_simple(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dee") - - # Add a dummy response to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/sleep".format(expected_prefix) - data = {"snooze": 10} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("dee", "sleep")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_with_transaction(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Create a real-ish transaction for this client. - transaction = client.transaction() - txn_id = b"\x00\x00\x01-work-\xf2" - transaction._id = txn_id - - # Make a **real** collection reference as parent. - parent = client.collection("declaration") - - # Add a dummy response to the minimal fake GAPIC. - parent_path, expected_prefix = parent._parent_info() - name = "{}/burger".format(expected_prefix) - data = {"lettuce": b"\xee\x87"} - response_pb = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream(transaction=transaction) - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("declaration", "burger")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test_stream_no_results(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["run_query"]) - empty_response = _make_query_response() - run_query_response = iter([empty_response]) - firestore_api.run_query.return_value = run_query_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dah", "dah", "dum") - query = self._make_one(parent) - - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - self.assertEqual(list(get_response), []) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_second_response_in_empty_stream(self): - # Create a minimal fake GAPIC with a dummy response. - firestore_api = mock.Mock(spec=["run_query"]) - empty_response1 = _make_query_response() - empty_response2 = _make_query_response() - run_query_response = iter([empty_response1, empty_response2]) - firestore_api.run_query.return_value = run_query_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("dah", "dah", "dum") - query = self._make_one(parent) - - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - self.assertEqual(list(get_response), []) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_with_skipped_results(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("talk", "and", "chew-gum") - - # Add two dummy responses to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - response_pb1 = _make_query_response(skipped_results=1) - name = "{}/clock".format(expected_prefix) - data = {"noon": 12, "nested": {"bird": 10.5}} - response_pb2 = _make_query_response(name=name, data=data) - firestore_api.run_query.return_value = iter([response_pb1, response_pb2]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("talk", "and", "chew-gum", "clock")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - def test_stream_empty_after_first_response(self): - # Create a minimal fake GAPIC. - firestore_api = mock.Mock(spec=["run_query"]) - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Make a **real** collection reference as parent. - parent = client.collection("charles") - - # Add two dummy responses to the minimal fake GAPIC. - _, expected_prefix = parent._parent_info() - name = "{}/bark".format(expected_prefix) - data = {"lee": "hoop"} - response_pb1 = _make_query_response(name=name, data=data) - response_pb2 = _make_query_response() - firestore_api.run_query.return_value = iter([response_pb1, response_pb2]) - - # Execute the query and check the response. - query = self._make_one(parent) - get_response = query.stream() - self.assertIsInstance(get_response, types.GeneratorType) - returned = list(get_response) - self.assertEqual(len(returned), 1) - snapshot = returned[0] - self.assertEqual(snapshot.reference._path, ("charles", "bark")) - self.assertEqual(snapshot.to_dict(), data) - - # Verify the mock call. - parent_path, _ = parent._parent_info() - firestore_api.run_query.assert_called_once_with( - parent_path, - query._to_protobuf(), - transaction=None, - metadata=client._rpc_metadata, - ) - - @mock.patch("google.cloud.firestore_v1beta1.query.Watch", autospec=True) - def test_on_snapshot(self, watch): - query = self._make_one(mock.sentinel.parent) - query.on_snapshot(None) - watch.for_query.assert_called_once() - - def test_comparator_no_ordering(self): - query = self._make_one(mock.sentinel.parent) - query._orders = [] - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, -1) - - def test_comparator_no_ordering_same_id(self): - query = self._make_one(mock.sentinel.parent) - query._orders = [] - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument1") - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, 0) - - def test_comparator_ordering(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = 1 # ascending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "secondlovelace"}, - } - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, 1) - - def test_comparator_ordering_descending(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = -1 # descending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "secondlovelace"}, - } - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - sort = query._comparator(doc1, doc2) - self.assertEqual(sort, -1) - - def test_comparator_missing_order_by_field_in_data_raises(self): - query = self._make_one(mock.sentinel.parent) - orderByMock = mock.Mock() - orderByMock.field.field_path = "last" - orderByMock.direction = 1 # ascending - query._orders = [orderByMock] - - doc1 = mock.Mock() - doc1.reference._path = ("col", "adocument1") - doc1._data = {} - doc2 = mock.Mock() - doc2.reference._path = ("col", "adocument2") - doc2._data = { - "first": {"stringValue": "Ada"}, - "last": {"stringValue": "lovelace"}, - } - - with self.assertRaisesRegex(ValueError, "Can only compare fields "): - query._comparator(doc1, doc2) - - -class Test__enum_from_op_string(unittest.TestCase): - @staticmethod - def _call_fut(op_string): - from google.cloud.firestore_v1beta1.query import _enum_from_op_string - - return _enum_from_op_string(op_string) - - def test_success(self): - from google.cloud.firestore_v1beta1.gapic import enums - - op_class = enums.StructuredQuery.FieldFilter.Operator - self.assertEqual(self._call_fut("<"), op_class.LESS_THAN) - self.assertEqual(self._call_fut("<="), op_class.LESS_THAN_OR_EQUAL) - self.assertEqual(self._call_fut("=="), op_class.EQUAL) - self.assertEqual(self._call_fut(">="), op_class.GREATER_THAN_OR_EQUAL) - self.assertEqual(self._call_fut(">"), op_class.GREATER_THAN) - self.assertEqual(self._call_fut("array_contains"), op_class.ARRAY_CONTAINS) - - def test_failure(self): - with self.assertRaises(ValueError): - self._call_fut("?") - - -class Test__isnan(unittest.TestCase): - @staticmethod - def _call_fut(value): - from google.cloud.firestore_v1beta1.query import _isnan - - return _isnan(value) - - def test_valid(self): - self.assertTrue(self._call_fut(float("nan"))) - - def test_invalid(self): - self.assertFalse(self._call_fut(51.5)) - self.assertFalse(self._call_fut(None)) - self.assertFalse(self._call_fut("str")) - self.assertFalse(self._call_fut(int)) - self.assertFalse(self._call_fut(1.0 + 1.0j)) - - -class Test__enum_from_direction(unittest.TestCase): - @staticmethod - def _call_fut(direction): - from google.cloud.firestore_v1beta1.query import _enum_from_direction - - return _enum_from_direction(direction) - - def test_success(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.query import Query - - dir_class = enums.StructuredQuery.Direction - self.assertEqual(self._call_fut(Query.ASCENDING), dir_class.ASCENDING) - self.assertEqual(self._call_fut(Query.DESCENDING), dir_class.DESCENDING) - - # Ints pass through - self.assertEqual(self._call_fut(dir_class.ASCENDING), dir_class.ASCENDING) - self.assertEqual(self._call_fut(dir_class.DESCENDING), dir_class.DESCENDING) - - def test_failure(self): - with self.assertRaises(ValueError): - self._call_fut("neither-ASCENDING-nor-DESCENDING") - - -class Test__filter_pb(unittest.TestCase): - @staticmethod - def _call_fut(field_or_unary): - from google.cloud.firestore_v1beta1.query import _filter_pb - - return _filter_pb(field_or_unary) - - def test_unary(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import query_pb2 - - unary_pb = query_pb2.StructuredQuery.UnaryFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="a.b.c"), - op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL, - ) - filter_pb = self._call_fut(unary_pb) - expected_pb = query_pb2.StructuredQuery.Filter(unary_filter=unary_pb) - self.assertEqual(filter_pb, expected_pb) - - def test_field(self): - from google.cloud.firestore_v1beta1.gapic import enums - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import query_pb2 - - field_filter_pb = query_pb2.StructuredQuery.FieldFilter( - field=query_pb2.StructuredQuery.FieldReference(field_path="XYZ"), - op=enums.StructuredQuery.FieldFilter.Operator.GREATER_THAN, - value=document_pb2.Value(double_value=90.75), - ) - filter_pb = self._call_fut(field_filter_pb) - expected_pb = query_pb2.StructuredQuery.Filter(field_filter=field_filter_pb) - self.assertEqual(filter_pb, expected_pb) - - def test_bad_type(self): - with self.assertRaises(ValueError): - self._call_fut(None) - - -class Test__cursor_pb(unittest.TestCase): - @staticmethod - def _call_fut(cursor_pair): - from google.cloud.firestore_v1beta1.query import _cursor_pb - - return _cursor_pb(cursor_pair) - - def test_no_pair(self): - self.assertIsNone(self._call_fut(None)) - - def test_success(self): - from google.cloud.firestore_v1beta1.proto import query_pb2 - from google.cloud.firestore_v1beta1 import _helpers - - data = [1.5, 10, True] - cursor_pair = data, True - - cursor_pb = self._call_fut(cursor_pair) - - expected_pb = query_pb2.Cursor( - values=[_helpers.encode_value(value) for value in data], before=True - ) - self.assertEqual(cursor_pb, expected_pb) - - -class Test__query_response_to_snapshot(unittest.TestCase): - @staticmethod - def _call_fut(response_pb, collection, expected_prefix): - from google.cloud.firestore_v1beta1.query import _query_response_to_snapshot - - return _query_response_to_snapshot(response_pb, collection, expected_prefix) - - def test_empty(self): - response_pb = _make_query_response() - snapshot = self._call_fut(response_pb, None, None) - self.assertIsNone(snapshot) - - def test_after_offset(self): - skipped_results = 410 - response_pb = _make_query_response(skipped_results=skipped_results) - snapshot = self._call_fut(response_pb, None, None) - self.assertIsNone(snapshot) - - def test_response(self): - from google.cloud.firestore_v1beta1.document import DocumentSnapshot - - client = _make_client() - collection = client.collection("a", "b", "c") - _, expected_prefix = collection._parent_info() - - # Create name for the protobuf. - doc_id = "gigantic" - name = "{}/{}".format(expected_prefix, doc_id) - data = {"a": 901, "b": True} - response_pb = _make_query_response(name=name, data=data) - - snapshot = self._call_fut(response_pb, collection, expected_prefix) - self.assertIsInstance(snapshot, DocumentSnapshot) - expected_path = collection._path + (doc_id,) - self.assertEqual(snapshot.reference._path, expected_path) - self.assertEqual(snapshot.to_dict(), data) - self.assertTrue(snapshot.exists) - self.assertEqual(snapshot.read_time, response_pb.read_time) - self.assertEqual(snapshot.create_time, response_pb.document.create_time) - self.assertEqual(snapshot.update_time, response_pb.document.update_time) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="project-project"): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - return Client(project=project, credentials=credentials) - - -def _make_order_pb(field_path, direction): - from google.cloud.firestore_v1beta1.proto import query_pb2 - - return query_pb2.StructuredQuery.Order( - field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), - direction=direction, - ) - - -def _make_query_response(**kwargs): - # kwargs supported are ``skipped_results``, ``name`` and ``data`` - from google.cloud.firestore_v1beta1.proto import document_pb2 - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.firestore_v1beta1 import _helpers - - now = datetime.datetime.utcnow() - read_time = _datetime_to_pb_timestamp(now) - kwargs["read_time"] = read_time - - name = kwargs.pop("name", None) - data = kwargs.pop("data", None) - if name is not None and data is not None: - document_pb = document_pb2.Document( - name=name, fields=_helpers.encode_dict(data) - ) - delta = datetime.timedelta(seconds=100) - update_time = _datetime_to_pb_timestamp(now - delta) - create_time = _datetime_to_pb_timestamp(now - 2 * delta) - document_pb.update_time.CopyFrom(update_time) - document_pb.create_time.CopyFrom(create_time) - - kwargs["document"] = document_pb - - return firestore_pb2.RunQueryResponse(**kwargs) diff --git a/firestore/tests/unit/v1beta1/test_transaction.py b/firestore/tests/unit/v1beta1/test_transaction.py deleted file mode 100644 index 1797007495f5..000000000000 --- a/firestore/tests/unit/v1beta1/test_transaction.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock -import pytest - - -class TestTransaction(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.transaction import Transaction - - return Transaction - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor_defaults(self): - from google.cloud.firestore_v1beta1.transaction import MAX_ATTEMPTS - - transaction = self._make_one(mock.sentinel.client) - self.assertIs(transaction._client, mock.sentinel.client) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, MAX_ATTEMPTS) - self.assertFalse(transaction._read_only) - self.assertIsNone(transaction._id) - - def test_constructor_explicit(self): - transaction = self._make_one( - mock.sentinel.client, max_attempts=10, read_only=True - ) - self.assertIs(transaction._client, mock.sentinel.client) - self.assertEqual(transaction._write_pbs, []) - self.assertEqual(transaction._max_attempts, 10) - self.assertTrue(transaction._read_only) - self.assertIsNone(transaction._id) - - def test__add_write_pbs_failure(self): - from google.cloud.firestore_v1beta1.transaction import _WRITE_READ_ONLY - - batch = self._make_one(mock.sentinel.client, read_only=True) - self.assertEqual(batch._write_pbs, []) - with self.assertRaises(ValueError) as exc_info: - batch._add_write_pbs([mock.sentinel.write]) - - self.assertEqual(exc_info.exception.args, (_WRITE_READ_ONLY,)) - self.assertEqual(batch._write_pbs, []) - - def test__add_write_pbs(self): - batch = self._make_one(mock.sentinel.client) - self.assertEqual(batch._write_pbs, []) - batch._add_write_pbs([mock.sentinel.write]) - self.assertEqual(batch._write_pbs, [mock.sentinel.write]) - - def test__options_protobuf_read_only(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - transaction = self._make_one(mock.sentinel.client, read_only=True) - options_pb = transaction._options_protobuf(None) - expected_pb = common_pb2.TransactionOptions( - read_only=common_pb2.TransactionOptions.ReadOnly() - ) - self.assertEqual(options_pb, expected_pb) - - def test__options_protobuf_read_only_retry(self): - from google.cloud.firestore_v1beta1.transaction import _CANT_RETRY_READ_ONLY - - transaction = self._make_one(mock.sentinel.client, read_only=True) - retry_id = b"illuminate" - - with self.assertRaises(ValueError) as exc_info: - transaction._options_protobuf(retry_id) - - self.assertEqual(exc_info.exception.args, (_CANT_RETRY_READ_ONLY,)) - - def test__options_protobuf_read_write(self): - transaction = self._make_one(mock.sentinel.client) - options_pb = transaction._options_protobuf(None) - self.assertIsNone(options_pb) - - def test__options_protobuf_on_retry(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - transaction = self._make_one(mock.sentinel.client) - retry_id = b"hocus-pocus" - options_pb = transaction._options_protobuf(retry_id) - expected_pb = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite( - retry_transaction=retry_id - ) - ) - self.assertEqual(options_pb, expected_pb) - - def test_in_progress_property(self): - transaction = self._make_one(mock.sentinel.client) - self.assertFalse(transaction.in_progress) - transaction._id = b"not-none-bites" - self.assertTrue(transaction.in_progress) - - def test_id_property(self): - transaction = self._make_one(mock.sentinel.client) - transaction._id = mock.sentinel.eye_dee - self.assertIs(transaction.id, mock.sentinel.eye_dee) - - def test__begin(self): - from google.cloud.firestore_v1beta1.gapic import firestore_client - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - txn_id = b"to-begin" - response = firestore_pb2.BeginTransactionResponse(transaction=txn_id) - firestore_api.begin_transaction.return_value = response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and ``begin()`` it. - transaction = self._make_one(client) - self.assertIsNone(transaction._id) - - ret_val = transaction._begin() - self.assertIsNone(ret_val) - self.assertEqual(transaction._id, txn_id) - - # Verify the called mock. - firestore_api.begin_transaction.assert_called_once_with( - client._database_string, options_=None, metadata=client._rpc_metadata - ) - - def test__begin_failure(self): - from google.cloud.firestore_v1beta1.transaction import _CANT_BEGIN - - client = _make_client() - transaction = self._make_one(client) - transaction._id = b"not-none" - - with self.assertRaises(ValueError) as exc_info: - transaction._begin() - - err_msg = _CANT_BEGIN.format(transaction._id) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - def test__clean_up(self): - transaction = self._make_one(mock.sentinel.client) - transaction._write_pbs.extend( - [mock.sentinel.write_pb1, mock.sentinel.write_pb2] - ) - transaction._id = b"not-this-time-my-friend" - - ret_val = transaction._clean_up() - self.assertIsNone(ret_val) - - self.assertEqual(transaction._write_pbs, []) - self.assertIsNone(transaction._id) - - def test__rollback(self): - from google.protobuf import empty_pb2 - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - firestore_api.rollback.return_value = empty_pb2.Empty() - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and roll it back. - transaction = self._make_one(client) - txn_id = b"to-be-r\x00lled" - transaction._id = txn_id - ret_val = transaction._rollback() - self.assertIsNone(ret_val) - self.assertIsNone(transaction._id) - - # Verify the called mock. - firestore_api.rollback.assert_called_once_with( - client._database_string, txn_id, metadata=client._rpc_metadata - ) - - def test__rollback_not_allowed(self): - from google.cloud.firestore_v1beta1.transaction import _CANT_ROLLBACK - - client = _make_client() - transaction = self._make_one(client) - self.assertIsNone(transaction._id) - - with self.assertRaises(ValueError) as exc_info: - transaction._rollback() - - self.assertEqual(exc_info.exception.args, (_CANT_ROLLBACK,)) - - def test__rollback_failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy failure. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - exc = exceptions.InternalServerError("Fire during rollback.") - firestore_api.rollback.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction and roll it back. - transaction = self._make_one(client) - txn_id = b"roll-bad-server" - transaction._id = txn_id - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - transaction._rollback() - - self.assertIs(exc_info.exception, exc) - self.assertIsNone(transaction._id) - self.assertEqual(transaction._write_pbs, []) - - # Verify the called mock. - firestore_api.rollback.assert_called_once_with( - client._database_string, txn_id, metadata=client._rpc_metadata - ) - - def test__commit(self): - from google.cloud.firestore_v1beta1.gapic import firestore_client - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client("phone-joe") - client._firestore_api_internal = firestore_api - - # Actually make a transaction with some mutations and call _commit(). - transaction = self._make_one(client) - txn_id = b"under-over-thru-woods" - transaction._id = txn_id - document = client.document("zap", "galaxy", "ship", "space") - transaction.set(document, {"apple": 4.5}) - write_pbs = transaction._write_pbs[::] - - write_results = transaction._commit() - self.assertEqual(write_results, list(commit_response.write_results)) - # Make sure transaction has no more "changes". - self.assertIsNone(transaction._id) - self.assertEqual(transaction._write_pbs, []) - - # Verify the mocks. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - def test__commit_not_allowed(self): - from google.cloud.firestore_v1beta1.transaction import _CANT_COMMIT - - transaction = self._make_one(mock.sentinel.client) - self.assertIsNone(transaction._id) - with self.assertRaises(ValueError) as exc_info: - transaction._commit() - - self.assertEqual(exc_info.exception.args, (_CANT_COMMIT,)) - - def test__commit_failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy failure. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - exc = exceptions.InternalServerError("Fire during commit.") - firestore_api.commit.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - # Actually make a transaction with some mutations and call _commit(). - transaction = self._make_one(client) - txn_id = b"beep-fail-commit" - transaction._id = txn_id - transaction.create(client.document("up", "down"), {"water": 1.0}) - transaction.delete(client.document("up", "left")) - write_pbs = transaction._write_pbs[::] - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - transaction._commit() - - self.assertIs(exc_info.exception, exc) - self.assertEqual(transaction._id, txn_id) - self.assertEqual(transaction._write_pbs, write_pbs) - - # Verify the called mock. - firestore_api.commit.assert_called_once_with( - client._database_string, - write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - -class Test_Transactional(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.transaction import _Transactional - - return _Transactional - - def _make_one(self, *args, **kwargs): - klass = self._get_target_class() - return klass(*args, **kwargs) - - def test_constructor(self): - wrapped = self._make_one(mock.sentinel.callable_) - self.assertIs(wrapped.to_wrap, mock.sentinel.callable_) - self.assertIsNone(wrapped.current_id) - self.assertIsNone(wrapped.retry_id) - - def test__reset(self): - wrapped = self._make_one(mock.sentinel.callable_) - wrapped.current_id = b"not-none" - wrapped.retry_id = b"also-not" - - ret_val = wrapped._reset() - self.assertIsNone(ret_val) - - self.assertIsNone(wrapped.current_id) - self.assertIsNone(wrapped.retry_id) - - def test__pre_commit_success(self): - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"totes-began" - transaction = _make_transaction(txn_id) - result = wrapped._pre_commit(transaction, "pos", key="word") - self.assertIs(result, mock.sentinel.result) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "pos", key="word") - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_not_called() - - def test__pre_commit_retry_id_already_set_success(self): - from google.cloud.firestore_v1beta1.proto import common_pb2 - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - txn_id1 = b"already-set" - wrapped.retry_id = txn_id1 - - txn_id2 = b"ok-here-too" - transaction = _make_transaction(txn_id2) - result = wrapped._pre_commit(transaction) - self.assertIs(result, mock.sentinel.result) - - self.assertEqual(transaction._id, txn_id2) - self.assertEqual(wrapped.current_id, txn_id2) - self.assertEqual(wrapped.retry_id, txn_id1) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction) - firestore_api = transaction._client._firestore_api - options_ = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite( - retry_transaction=txn_id1 - ) - ) - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=options_, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_not_called() - - def test__pre_commit_failure(self): - exc = RuntimeError("Nope not today.") - to_wrap = mock.Mock(side_effect=exc, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"gotta-fail" - transaction = _make_transaction(txn_id) - with self.assertRaises(RuntimeError) as exc_info: - wrapped._pre_commit(transaction, 10, 20) - self.assertIs(exc_info.exception, exc) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, 10, 20) - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_not_called() - - def test__pre_commit_failure_with_rollback_failure(self): - from google.api_core import exceptions - - exc1 = ValueError("I will not be only failure.") - to_wrap = mock.Mock(side_effect=exc1, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"both-will-fail" - transaction = _make_transaction(txn_id) - # Actually force the ``rollback`` to fail as well. - exc2 = exceptions.InternalServerError("Rollback blues.") - firestore_api = transaction._client._firestore_api - firestore_api.rollback.side_effect = exc2 - - # Try to ``_pre_commit`` - with self.assertRaises(exceptions.InternalServerError) as exc_info: - wrapped._pre_commit(transaction, a="b", c="zebra") - self.assertIs(exc_info.exception, exc2) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, a="b", c="zebra") - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_not_called() - - def test__maybe_commit_success(self): - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"nyet" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - succeeded = wrapped._maybe_commit(transaction) - self.assertTrue(succeeded) - - # On success, _id is reset. - self.assertIsNone(transaction._id) - - # Verify mocks. - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_read_only(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed" - transaction = _make_transaction(txn_id, read_only=True) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail (use ABORTED, but cannot - # retry since read-only). - exc = exceptions.Aborted("Read-only did a bad.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - with self.assertRaises(exceptions.Aborted) as exc_info: - wrapped._maybe_commit(transaction) - self.assertIs(exc_info.exception, exc) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_can_retry(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed-but-retry" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail. - exc = exceptions.Aborted("Read-write did a bad.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - succeeded = wrapped._maybe_commit(transaction) - self.assertFalse(succeeded) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test__maybe_commit_failure_cannot_retry(self): - from google.api_core import exceptions - - wrapped = self._make_one(mock.sentinel.callable_) - - txn_id = b"failed-but-not-retryable" - transaction = _make_transaction(txn_id) - transaction._id = txn_id # We won't call ``begin()``. - wrapped.current_id = txn_id # We won't call ``_pre_commit()``. - wrapped.retry_id = txn_id # We won't call ``_pre_commit()``. - - # Actually force the ``commit`` to fail. - exc = exceptions.InternalServerError("Real bad thing") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - with self.assertRaises(exceptions.InternalServerError) as exc_info: - wrapped._maybe_commit(transaction) - self.assertIs(exc_info.exception, exc) - - self.assertEqual(transaction._id, txn_id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - firestore_api.begin_transaction.assert_not_called() - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test___call__success_first_attempt(self): - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"whole-enchilada" - transaction = _make_transaction(txn_id) - result = wrapped(transaction, "a", b="c") - self.assertIs(result, mock.sentinel.result) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "a", b="c") - firestore_api = transaction._client._firestore_api - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_not_called() - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - def test___call__success_second_attempt(self): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.proto import common_pb2 - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"whole-enchilada" - transaction = _make_transaction(txn_id) - - # Actually force the ``commit`` to fail on first / succeed on second. - exc = exceptions.Aborted("Contention junction.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = [ - exc, - firestore_pb2.CommitResponse(write_results=[write_pb2.WriteResult()]), - ] - - # Call the __call__-able ``wrapped``. - result = wrapped(transaction, "a", b="c") - self.assertIs(result, mock.sentinel.result) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - wrapped_call = mock.call(transaction, "a", b="c") - self.assertEqual(to_wrap.mock_calls, [wrapped_call, wrapped_call]) - firestore_api = transaction._client._firestore_api - db_str = transaction._client._database_string - options_ = common_pb2.TransactionOptions( - read_write=common_pb2.TransactionOptions.ReadWrite(retry_transaction=txn_id) - ) - self.assertEqual( - firestore_api.begin_transaction.mock_calls, - [ - mock.call( - db_str, options_=None, metadata=transaction._client._rpc_metadata - ), - mock.call( - db_str, - options_=options_, - metadata=transaction._client._rpc_metadata, - ), - ], - ) - firestore_api.rollback.assert_not_called() - commit_call = mock.call( - db_str, [], transaction=txn_id, metadata=transaction._client._rpc_metadata - ) - self.assertEqual(firestore_api.commit.mock_calls, [commit_call, commit_call]) - - def test___call__failure(self): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.transaction import _EXCEED_ATTEMPTS_TEMPLATE - - to_wrap = mock.Mock(return_value=mock.sentinel.result, spec=[]) - wrapped = self._make_one(to_wrap) - - txn_id = b"only-one-shot" - transaction = _make_transaction(txn_id, max_attempts=1) - - # Actually force the ``commit`` to fail. - exc = exceptions.Aborted("Contention just once.") - firestore_api = transaction._client._firestore_api - firestore_api.commit.side_effect = exc - - # Call the __call__-able ``wrapped``. - with self.assertRaises(ValueError) as exc_info: - wrapped(transaction, "here", there=1.5) - - err_msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts) - self.assertEqual(exc_info.exception.args, (err_msg,)) - - self.assertIsNone(transaction._id) - self.assertEqual(wrapped.current_id, txn_id) - self.assertEqual(wrapped.retry_id, txn_id) - - # Verify mocks. - to_wrap.assert_called_once_with(transaction, "here", there=1.5) - firestore_api.begin_transaction.assert_called_once_with( - transaction._client._database_string, - options_=None, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.rollback.assert_called_once_with( - transaction._client._database_string, - txn_id, - metadata=transaction._client._rpc_metadata, - ) - firestore_api.commit.assert_called_once_with( - transaction._client._database_string, - [], - transaction=txn_id, - metadata=transaction._client._rpc_metadata, - ) - - -class Test_transactional(unittest.TestCase): - @staticmethod - def _call_fut(to_wrap): - from google.cloud.firestore_v1beta1.transaction import transactional - - return transactional(to_wrap) - - def test_it(self): - from google.cloud.firestore_v1beta1.transaction import _Transactional - - wrapped = self._call_fut(mock.sentinel.callable_) - self.assertIsInstance(wrapped, _Transactional) - self.assertIs(wrapped.to_wrap, mock.sentinel.callable_) - - -class Test__commit_with_retry(unittest.TestCase): - @staticmethod - def _call_fut(client, write_pbs, transaction_id): - from google.cloud.firestore_v1beta1.transaction import _commit_with_retry - - return _commit_with_retry(client, write_pbs, transaction_id) - - @mock.patch("google.cloud.firestore_v1beta1.transaction._sleep") - def test_success_first_attempt(self, _sleep): - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - - # Attach the fake GAPIC to a real client. - client = _make_client("summer") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"cheeeeeez" - commit_response = self._call_fut(client, mock.sentinel.write_pbs, txn_id) - self.assertIs(commit_response, firestore_api.commit.return_value) - - # Verify mocks used. - _sleep.assert_not_called() - firestore_api.commit.assert_called_once_with( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - @mock.patch( - "google.cloud.firestore_v1beta1.transaction._sleep", side_effect=[2.0, 4.0] - ) - def test_success_third_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first two requests fail and the third succeeds. - firestore_api.commit.side_effect = [ - exceptions.ServiceUnavailable("Server sleepy."), - exceptions.ServiceUnavailable("Server groggy."), - mock.sentinel.commit_response, - ] - - # Attach the fake GAPIC to a real client. - client = _make_client("outside") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"the-world\x00" - commit_response = self._call_fut(client, mock.sentinel.write_pbs, txn_id) - self.assertIs(commit_response, mock.sentinel.commit_response) - - # Verify mocks used. - self.assertEqual(_sleep.call_count, 2) - _sleep.assert_any_call(1.0) - _sleep.assert_any_call(2.0) - # commit() called same way 3 times. - commit_call = mock.call( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - self.assertEqual( - firestore_api.commit.mock_calls, [commit_call, commit_call, commit_call] - ) - - @mock.patch("google.cloud.firestore_v1beta1.transaction._sleep") - def test_failure_first_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first request fails with an un-retryable error. - exc = exceptions.ResourceExhausted("We ran out of fries.") - firestore_api.commit.side_effect = exc - - # Attach the fake GAPIC to a real client. - client = _make_client("peanut-butter") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"\x08\x06\x07\x05\x03\x00\x09-jenny" - with self.assertRaises(exceptions.ResourceExhausted) as exc_info: - self._call_fut(client, mock.sentinel.write_pbs, txn_id) - - self.assertIs(exc_info.exception, exc) - - # Verify mocks used. - _sleep.assert_not_called() - firestore_api.commit.assert_called_once_with( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - - @mock.patch("google.cloud.firestore_v1beta1.transaction._sleep", return_value=2.0) - def test_failure_second_attempt(self, _sleep): - from google.api_core import exceptions - from google.cloud.firestore_v1beta1.gapic import firestore_client - - # Create a minimal fake GAPIC with a dummy result. - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # Make sure the first request fails retry-able and second - # fails non-retryable. - exc1 = exceptions.ServiceUnavailable("Come back next time.") - exc2 = exceptions.InternalServerError("Server on fritz.") - firestore_api.commit.side_effect = [exc1, exc2] - - # Attach the fake GAPIC to a real client. - client = _make_client("peanut-butter") - client._firestore_api_internal = firestore_api - - # Call function and check result. - txn_id = b"the-journey-when-and-where-well-go" - with self.assertRaises(exceptions.InternalServerError) as exc_info: - self._call_fut(client, mock.sentinel.write_pbs, txn_id) - - self.assertIs(exc_info.exception, exc2) - - # Verify mocks used. - _sleep.assert_called_once_with(1.0) - # commit() called same way 2 times. - commit_call = mock.call( - client._database_string, - mock.sentinel.write_pbs, - transaction=txn_id, - metadata=client._rpc_metadata, - ) - self.assertEqual(firestore_api.commit.mock_calls, [commit_call, commit_call]) - - -class Test__sleep(unittest.TestCase): - @staticmethod - def _call_fut(current_sleep, **kwargs): - from google.cloud.firestore_v1beta1.transaction import _sleep - - return _sleep(current_sleep, **kwargs) - - @mock.patch("random.uniform", return_value=5.5) - @mock.patch("time.sleep", return_value=None) - def test_defaults(self, sleep, uniform): - curr_sleep = 10.0 - self.assertLessEqual(uniform.return_value, curr_sleep) - - new_sleep = self._call_fut(curr_sleep) - self.assertEqual(new_sleep, 2.0 * curr_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - @mock.patch("random.uniform", return_value=10.5) - @mock.patch("time.sleep", return_value=None) - def test_explicit(self, sleep, uniform): - curr_sleep = 12.25 - self.assertLessEqual(uniform.return_value, curr_sleep) - - multiplier = 1.5 - new_sleep = self._call_fut(curr_sleep, max_sleep=100.0, multiplier=multiplier) - self.assertEqual(new_sleep, multiplier * curr_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - @mock.patch("random.uniform", return_value=6.75) - @mock.patch("time.sleep", return_value=None) - def test_exceeds_max(self, sleep, uniform): - curr_sleep = 20.0 - self.assertLessEqual(uniform.return_value, curr_sleep) - - max_sleep = 38.5 - new_sleep = self._call_fut(curr_sleep, max_sleep=max_sleep, multiplier=2.0) - self.assertEqual(new_sleep, max_sleep) - - uniform.assert_called_once_with(0.0, curr_sleep) - sleep.assert_called_once_with(uniform.return_value) - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_client(project="feral-tom-cat"): - from google.cloud.firestore_v1beta1.client import Client - - credentials = _make_credentials() - - with pytest.deprecated_call(): - return Client(project=project, credentials=credentials) - - -def _make_transaction(txn_id, **txn_kwargs): - from google.protobuf import empty_pb2 - from google.cloud.firestore_v1beta1.gapic import firestore_client - from google.cloud.firestore_v1beta1.proto import firestore_pb2 - from google.cloud.firestore_v1beta1.proto import write_pb2 - from google.cloud.firestore_v1beta1.transaction import Transaction - - # Create a fake GAPIC ... - firestore_api = mock.create_autospec( - firestore_client.FirestoreClient, instance=True - ) - # ... with a dummy ``BeginTransactionResponse`` result ... - begin_response = firestore_pb2.BeginTransactionResponse(transaction=txn_id) - firestore_api.begin_transaction.return_value = begin_response - # ... and a dummy ``Rollback`` result ... - firestore_api.rollback.return_value = empty_pb2.Empty() - # ... and a dummy ``Commit`` result. - commit_response = firestore_pb2.CommitResponse( - write_results=[write_pb2.WriteResult()] - ) - firestore_api.commit.return_value = commit_response - - # Attach the fake GAPIC to a real client. - client = _make_client() - client._firestore_api_internal = firestore_api - - return Transaction(client, **txn_kwargs) diff --git a/firestore/tests/unit/v1beta1/test_transforms.py b/firestore/tests/unit/v1beta1/test_transforms.py deleted file mode 100644 index 0f549ae07565..000000000000 --- a/firestore/tests/unit/v1beta1/test_transforms.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class Test_ValueList(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.firestore_v1beta1.transforms import _ValueList - - return _ValueList - - def _make_one(self, values): - return self._get_target_class()(values) - - def test_ctor_w_non_list_non_tuple(self): - invalid_values = (None, u"phred", b"DEADBEEF", 123, {}, object()) - for invalid_value in invalid_values: - with self.assertRaises(ValueError): - self._make_one(invalid_value) - - def test_ctor_w_empty(self): - with self.assertRaises(ValueError): - self._make_one([]) - - def test_ctor_w_non_empty_list(self): - values = ["phred", "bharney"] - inst = self._make_one(values) - self.assertEqual(inst.values, values) - - def test_ctor_w_non_empty_tuple(self): - values = ("phred", "bharney") - inst = self._make_one(values) - self.assertEqual(inst.values, list(values)) - - def test___eq___other_type(self): - values = ("phred", "bharney") - inst = self._make_one(values) - other = object() - self.assertFalse(inst == other) - - def test___eq___different_values(self): - values = ("phred", "bharney") - other_values = ("wylma", "bhetty") - inst = self._make_one(values) - other = self._make_one(other_values) - self.assertFalse(inst == other) - - def test___eq___same_values(self): - values = ("phred", "bharney") - inst = self._make_one(values) - other = self._make_one(values) - self.assertTrue(inst == other) diff --git a/firestore/tests/unit/v1beta1/test_watch.py b/firestore/tests/unit/v1beta1/test_watch.py deleted file mode 100644 index 6d8ba5a040bf..000000000000 --- a/firestore/tests/unit/v1beta1/test_watch.py +++ /dev/null @@ -1,832 +0,0 @@ -import datetime -import unittest -import mock -from google.cloud.firestore_v1beta1.proto import firestore_pb2 - - -class TestWatchDocTree(unittest.TestCase): - def _makeOne(self): - from google.cloud.firestore_v1beta1.watch import WatchDocTree - - return WatchDocTree() - - def test_insert_and_keys(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(sorted(inst.keys()), ["a", "b"]) - - def test_remove_and_keys(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - inst = inst.remove("a") - self.assertEqual(sorted(inst.keys()), ["b"]) - - def test_insert_and_find(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - val = inst.find("a") - self.assertEqual(val.value, 2) - - def test___len__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(len(inst), 2) - - def test___iter__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - inst = inst.insert("a", 2) - self.assertEqual(sorted(list(inst)), ["a", "b"]) - - def test___contains__(self): - inst = self._makeOne() - inst = inst.insert("b", 1) - self.assertTrue("b" in inst) - self.assertFalse("a" in inst) - - -class TestDocumentChange(unittest.TestCase): - def _makeOne(self, type, document, old_index, new_index): - from google.cloud.firestore_v1beta1.watch import DocumentChange - - return DocumentChange(type, document, old_index, new_index) - - def test_ctor(self): - inst = self._makeOne("type", "document", "old_index", "new_index") - self.assertEqual(inst.type, "type") - self.assertEqual(inst.document, "document") - self.assertEqual(inst.old_index, "old_index") - self.assertEqual(inst.new_index, "new_index") - - -class TestWatchResult(unittest.TestCase): - def _makeOne(self, snapshot, name, change_type): - from google.cloud.firestore_v1beta1.watch import WatchResult - - return WatchResult(snapshot, name, change_type) - - def test_ctor(self): - inst = self._makeOne("snapshot", "name", "change_type") - self.assertEqual(inst.snapshot, "snapshot") - self.assertEqual(inst.name, "name") - self.assertEqual(inst.change_type, "change_type") - - -class Test_maybe_wrap_exception(unittest.TestCase): - def _callFUT(self, exc): - from google.cloud.firestore_v1beta1.watch import _maybe_wrap_exception - - return _maybe_wrap_exception(exc) - - def test_is_grpc_error(self): - import grpc - from google.api_core.exceptions import GoogleAPICallError - - exc = grpc.RpcError() - result = self._callFUT(exc) - self.assertEqual(result.__class__, GoogleAPICallError) - - def test_is_not_grpc_error(self): - exc = ValueError() - result = self._callFUT(exc) - self.assertEqual(result.__class__, ValueError) - - -class Test_document_watch_comparator(unittest.TestCase): - def _callFUT(self, doc1, doc2): - from google.cloud.firestore_v1beta1.watch import document_watch_comparator - - return document_watch_comparator(doc1, doc2) - - def test_same_doc(self): - result = self._callFUT(1, 1) - self.assertEqual(result, 0) - - def test_diff_doc(self): - self.assertRaises(AssertionError, self._callFUT, 1, 2) - - -class TestWatch(unittest.TestCase): - def _makeOne( - self, - document_reference=None, - firestore=None, - target=None, - comparator=None, - snapshot_callback=None, - snapshot_class=None, - reference_class=None, - ): # pragma: NO COVER - from google.cloud.firestore_v1beta1.watch import Watch - - if document_reference is None: - document_reference = DummyDocumentReference() - if firestore is None: - firestore = DummyFirestore() - if target is None: - WATCH_TARGET_ID = 0x5079 # "Py" - target = {"documents": {"documents": ["/"]}, "target_id": WATCH_TARGET_ID} - if comparator is None: - comparator = self._document_watch_comparator - if snapshot_callback is None: - snapshot_callback = self._snapshot_callback - if snapshot_class is None: - snapshot_class = DummyDocumentSnapshot - if reference_class is None: - reference_class = DummyDocumentReference - inst = Watch( - document_reference, - firestore, - target, - comparator, - snapshot_callback, - snapshot_class, - reference_class, - BackgroundConsumer=DummyBackgroundConsumer, - ResumableBidiRpc=DummyRpc, - ) - return inst - - def setUp(self): - self.snapshotted = None - - def _document_watch_comparator(self, doc1, doc2): # pragma: NO COVER - return 0 - - def _snapshot_callback(self, docs, changes, read_time): - self.snapshotted = (docs, changes, read_time) - - def test_ctor(self): - inst = self._makeOne() - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - - def test__on_rpc_done(self): - inst = self._makeOne() - threading = DummyThreading() - with mock.patch("google.cloud.firestore_v1beta1.watch.threading", threading): - inst._on_rpc_done(True) - from google.cloud.firestore_v1beta1.watch import _RPC_ERROR_THREAD_NAME - - self.assertTrue(threading.threads[_RPC_ERROR_THREAD_NAME].started) - - def test_close(self): - inst = self._makeOne() - inst.close() - self.assertEqual(inst._consumer, None) - self.assertEqual(inst._rpc, None) - self.assertTrue(inst._closed) - - def test_close_already_closed(self): - inst = self._makeOne() - inst._closed = True - old_consumer = inst._consumer - inst.close() - self.assertEqual(inst._consumer, old_consumer) - - def test_close_inactive(self): - inst = self._makeOne() - old_consumer = inst._consumer - old_consumer.is_active = False - inst.close() - self.assertEqual(old_consumer.stopped, False) - - def test_unsubscribe(self): - inst = self._makeOne() - inst.unsubscribe() - self.assertTrue(inst._rpc is None) - - def test_for_document(self): - from google.cloud.firestore_v1beta1.watch import Watch - - docref = DummyDocumentReference() - snapshot_callback = self._snapshot_callback - snapshot_class_instance = DummyDocumentSnapshot - document_reference_class_instance = DummyDocumentReference - modulename = "google.cloud.firestore_v1beta1.watch" - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - inst = Watch.for_document( - docref, - snapshot_callback, - snapshot_class_instance, - document_reference_class_instance, - ) - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - - def test_for_query(self): - from google.cloud.firestore_v1beta1.watch import Watch - - snapshot_callback = self._snapshot_callback - snapshot_class_instance = DummyDocumentSnapshot - document_reference_class_instance = DummyDocumentReference - modulename = "google.cloud.firestore_v1beta1.watch" - pb2 = DummyPb2() - with mock.patch("%s.firestore_pb2" % modulename, pb2): - with mock.patch("%s.Watch.ResumableBidiRpc" % modulename, DummyRpc): - with mock.patch( - "%s.Watch.BackgroundConsumer" % modulename, DummyBackgroundConsumer - ): - query = DummyQuery() - inst = Watch.for_query( - query, - snapshot_callback, - snapshot_class_instance, - document_reference_class_instance, - ) - self.assertTrue(inst._consumer.started) - self.assertTrue(inst._rpc.callbacks, [inst._on_rpc_done]) - self.assertEqual(inst._targets["query"], "dummy query target") - - def test_on_snapshot_target_no_change_no_target_ids_not_current(self): - inst = self._makeOne() - proto = DummyProto() - inst.on_snapshot(proto) # nothing to assert, no mutations, no rtnval - - def test_on_snapshot_target_no_change_no_target_ids_current(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.read_time = 1 - inst.current = True - - def push(read_time, next_resume_token): - inst._read_time = read_time - inst._next_resume_token = next_resume_token - - inst.push = push - inst.on_snapshot(proto) - self.assertEqual(inst._read_time, 1) - self.assertEqual(inst._next_resume_token, None) - - def test_on_snapshot_target_add(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.target_change_type = firestore_pb2.TargetChange.ADD - proto.target_change.target_ids = [1] # not "Py" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Unexpected target ID 1 sent by server") - - def test_on_snapshot_target_remove(self): - inst = self._makeOne() - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.REMOVE - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Error 1: hi") - - def test_on_snapshot_target_remove_nocause(self): - inst = self._makeOne() - proto = DummyProto() - target_change = proto.target_change - target_change.cause = None - target_change.target_change_type = firestore_pb2.TargetChange.REMOVE - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertEqual(str(exc.exception), "Error 13: internal error") - - def test_on_snapshot_target_reset(self): - inst = self._makeOne() - - def reset(): - inst._docs_reset = True - - inst._reset_docs = reset - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.RESET - inst.on_snapshot(proto) - self.assertTrue(inst._docs_reset) - - def test_on_snapshot_target_current(self): - inst = self._makeOne() - inst.current = False - proto = DummyProto() - target_change = proto.target_change - target_change.target_change_type = firestore_pb2.TargetChange.CURRENT - inst.on_snapshot(proto) - self.assertTrue(inst.current) - - def test_on_snapshot_target_unknown(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change.target_change_type = "unknown" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertTrue(inst._consumer is None) - self.assertTrue(inst._rpc is None) - self.assertEqual(str(exc.exception), "Unknown target change type: unknown ") - - def test_on_snapshot_document_change_removed(self): - from google.cloud.firestore_v1beta1.watch import WATCH_TARGET_ID, ChangeType - - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change.removed_target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "fred" - - proto.document_change.document = DummyDocument() - inst.on_snapshot(proto) - self.assertTrue(inst.change_map["fred"] is ChangeType.REMOVED) - - def test_on_snapshot_document_change_changed(self): - from google.cloud.firestore_v1beta1.watch import WATCH_TARGET_ID - - inst = self._makeOne() - - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "fred" - fields = {} - create_time = None - update_time = None - - proto.document_change.document = DummyDocument() - inst.on_snapshot(proto) - self.assertEqual(inst.change_map["fred"].data, {}) - - def test_on_snapshot_document_change_changed_docname_db_prefix(self): - # TODO: Verify the current behavior. The change map currently contains - # the db-prefixed document name and not the bare document name. - from google.cloud.firestore_v1beta1.watch import WATCH_TARGET_ID - - inst = self._makeOne() - - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [WATCH_TARGET_ID] - - class DummyDocument: - name = "abc://foo/documents/fred" - fields = {} - create_time = None - update_time = None - - proto.document_change.document = DummyDocument() - inst._firestore._database_string = "abc://foo" - inst.on_snapshot(proto) - self.assertEqual(inst.change_map["abc://foo/documents/fred"].data, {}) - - def test_on_snapshot_document_change_neither_changed_nor_removed(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change.target_ids = [] - - inst.on_snapshot(proto) - self.assertTrue(not inst.change_map) - - def test_on_snapshot_document_removed(self): - from google.cloud.firestore_v1beta1.watch import ChangeType - - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - - class DummyRemove(object): - document = "fred" - - remove = DummyRemove() - proto.document_remove = remove - proto.document_delete = "" - inst.on_snapshot(proto) - self.assertTrue(inst.change_map["fred"] is ChangeType.REMOVED) - - def test_on_snapshot_filter_update(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - - class DummyFilter(object): - count = 999 - - proto.filter = DummyFilter() - - def reset(): - inst._docs_reset = True - - inst._reset_docs = reset - inst.on_snapshot(proto) - self.assertTrue(inst._docs_reset) - - def test_on_snapshot_filter_update_no_size_change(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - - class DummyFilter(object): - count = 0 - - proto.filter = DummyFilter() - inst._docs_reset = False - - inst.on_snapshot(proto) - self.assertFalse(inst._docs_reset) - - def test_on_snapshot_unknown_listen_type(self): - inst = self._makeOne() - proto = DummyProto() - proto.target_change = "" - proto.document_change = "" - proto.document_remove = "" - proto.document_delete = "" - proto.filter = "" - with self.assertRaises(Exception) as exc: - inst.on_snapshot(proto) - self.assertTrue( - str(exc.exception).startswith("Unknown listen response type"), - str(exc.exception), - ) - - def test_push_callback_called_no_changes(self): - import pytz - - class DummyReadTime(object): - seconds = 1534858278 - - inst = self._makeOne() - inst.push(DummyReadTime, "token") - self.assertEqual( - self.snapshotted, - ([], [], datetime.datetime.fromtimestamp(DummyReadTime.seconds, pytz.utc)), - ) - self.assertTrue(inst.has_pushed) - self.assertEqual(inst.resume_token, "token") - - def test_push_already_pushed(self): - class DummyReadTime(object): - seconds = 1534858278 - - inst = self._makeOne() - inst.has_pushed = True - inst.push(DummyReadTime, "token") - self.assertEqual(self.snapshotted, None) - self.assertTrue(inst.has_pushed) - self.assertEqual(inst.resume_token, "token") - - def test__current_size_empty(self): - inst = self._makeOne() - result = inst._current_size() - self.assertEqual(result, 0) - - def test__current_size_docmap_has_one(self): - inst = self._makeOne() - inst.doc_map["a"] = 1 - result = inst._current_size() - self.assertEqual(result, 1) - - def test__affects_target_target_id_None(self): - inst = self._makeOne() - self.assertTrue(inst._affects_target(None, [])) - - def test__affects_target_current_id_in_target_ids(self): - inst = self._makeOne() - self.assertTrue(inst._affects_target([1], 1)) - - def test__affects_target_current_id_not_in_target_ids(self): - inst = self._makeOne() - self.assertFalse(inst._affects_target([1], 2)) - - def test__extract_changes_doc_removed(self): - from google.cloud.firestore_v1beta1.watch import ChangeType - - inst = self._makeOne() - changes = {"name": ChangeType.REMOVED} - doc_map = {"name": True} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, (["name"], [], [])) - - def test__extract_changes_doc_removed_docname_not_in_docmap(self): - from google.cloud.firestore_v1beta1.watch import ChangeType - - inst = self._makeOne() - changes = {"name": ChangeType.REMOVED} - doc_map = {} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [], [])) - - def test__extract_changes_doc_updated(self): - inst = self._makeOne() - - class Dummy(object): - pass - - doc = Dummy() - snapshot = Dummy() - changes = {"name": snapshot} - doc_map = {"name": doc} - results = inst._extract_changes(doc_map, changes, 1) - self.assertEqual(results, ([], [], [snapshot])) - self.assertEqual(snapshot.read_time, 1) - - def test__extract_changes_doc_updated_read_time_is_None(self): - inst = self._makeOne() - - class Dummy(object): - pass - - doc = Dummy() - snapshot = Dummy() - snapshot.read_time = None - changes = {"name": snapshot} - doc_map = {"name": doc} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [], [snapshot])) - self.assertEqual(snapshot.read_time, None) - - def test__extract_changes_doc_added(self): - inst = self._makeOne() - - class Dummy(object): - pass - - snapshot = Dummy() - changes = {"name": snapshot} - doc_map = {} - results = inst._extract_changes(doc_map, changes, 1) - self.assertEqual(results, ([], [snapshot], [])) - self.assertEqual(snapshot.read_time, 1) - - def test__extract_changes_doc_added_read_time_is_None(self): - inst = self._makeOne() - - class Dummy(object): - pass - - snapshot = Dummy() - snapshot.read_time = None - changes = {"name": snapshot} - doc_map = {} - results = inst._extract_changes(doc_map, changes, None) - self.assertEqual(results, ([], [snapshot], [])) - self.assertEqual(snapshot.read_time, None) - - def test__compute_snapshot_doctree_and_docmap_disagree_about_length(self): - inst = self._makeOne() - doc_tree = {} - doc_map = {None: None} - self.assertRaises( - AssertionError, inst._compute_snapshot, doc_tree, doc_map, None, None, None - ) - - def test__compute_snapshot_operation_relative_ordering(self): - from google.cloud.firestore_v1beta1.watch import WatchDocTree - - doc_tree = WatchDocTree() - - class DummyDoc(object): - update_time = mock.sentinel - - deleted_doc = DummyDoc() - added_doc = DummyDoc() - added_doc._document_path = "/added" - updated_doc = DummyDoc() - updated_doc._document_path = "/updated" - doc_tree = doc_tree.insert(deleted_doc, None) - doc_tree = doc_tree.insert(updated_doc, None) - doc_map = {"/deleted": deleted_doc, "/updated": updated_doc} - added_snapshot = DummyDocumentSnapshot(added_doc, None, True, None, None, None) - added_snapshot.reference = added_doc - updated_snapshot = DummyDocumentSnapshot( - updated_doc, None, True, None, None, None - ) - updated_snapshot.reference = updated_doc - delete_changes = ["/deleted"] - add_changes = [added_snapshot] - update_changes = [updated_snapshot] - inst = self._makeOne() - updated_tree, updated_map, applied_changes = inst._compute_snapshot( - doc_tree, doc_map, delete_changes, add_changes, update_changes - ) - # TODO: Verify that the assertion here is correct. - self.assertEqual( - updated_map, {"/updated": updated_snapshot, "/added": added_snapshot} - ) - - def test__compute_snapshot_modify_docs_updated_doc_no_timechange(self): - from google.cloud.firestore_v1beta1.watch import WatchDocTree - - doc_tree = WatchDocTree() - - class DummyDoc(object): - pass - - updated_doc_v1 = DummyDoc() - updated_doc_v1.update_time = 1 - updated_doc_v1._document_path = "/updated" - updated_doc_v2 = DummyDoc() - updated_doc_v2.update_time = 1 - updated_doc_v2._document_path = "/updated" - doc_tree = doc_tree.insert("/updated", updated_doc_v1) - doc_map = {"/updated": updated_doc_v1} - updated_snapshot = DummyDocumentSnapshot( - updated_doc_v2, None, True, None, None, 1 - ) - delete_changes = [] - add_changes = [] - update_changes = [updated_snapshot] - inst = self._makeOne() - updated_tree, updated_map, applied_changes = inst._compute_snapshot( - doc_tree, doc_map, delete_changes, add_changes, update_changes - ) - self.assertEqual(updated_map, doc_map) # no change - - def test__reset_docs(self): - from google.cloud.firestore_v1beta1.watch import ChangeType - - inst = self._makeOne() - inst.change_map = {None: None} - from google.cloud.firestore_v1beta1.watch import WatchDocTree - - doc = DummyDocumentReference("doc") - doc_tree = WatchDocTree() - snapshot = DummyDocumentSnapshot(doc, None, True, None, None, None) - snapshot.reference = doc - doc_tree = doc_tree.insert(snapshot, None) - inst.doc_tree = doc_tree - inst._reset_docs() - self.assertEqual(inst.change_map, {"/doc": ChangeType.REMOVED}) - self.assertEqual(inst.resume_token, None) - self.assertFalse(inst.current) - - -class DummyFirestoreStub(object): - def Listen(self): # pragma: NO COVER - pass - - -class DummyFirestoreClient(object): - def __init__(self): - self.transport = mock.Mock(_stubs={"firestore_stub": DummyFirestoreStub()}) - - -class DummyDocumentReference(object): - def __init__(self, *document_path, **kw): - if "client" not in kw: - self._client = DummyFirestore() - else: - self._client = kw["client"] - - self._path = document_path - self._document_path = "/" + "/".join(document_path) - self.__dict__.update(kw) - - -class DummyQuery(object): # pragma: NO COVER - def __init__(self, **kw): - if "client" not in kw: - self._client = DummyFirestore() - else: - self._client = kw["client"] - - if "comparator" not in kw: - # don't really do the comparison, just return 0 (equal) for all - self._comparator = lambda x, y: 1 - else: - self._comparator = kw["comparator"] - - def _to_protobuf(self): - return "" - - -class DummyFirestore(object): - _firestore_api = DummyFirestoreClient() - _database_string = "abc://bar/" - _rpc_metadata = None - - def document(self, *document_path): # pragma: NO COVER - if len(document_path) == 1: - path = document_path[0].split("/") - else: - path = document_path - - return DummyDocumentReference(*path, client=self) - - -class DummyDocumentSnapshot(object): - # def __init__(self, **kw): - # self.__dict__.update(kw) - def __init__(self, reference, data, exists, read_time, create_time, update_time): - self.reference = reference - self.data = data - self.exists = exists - self.read_time = read_time - self.create_time = create_time - self.update_time = update_time - - def __str__(self): - return "%s-%s" % (self.reference._document_path, self.read_time) - - def __hash__(self): - return hash(str(self)) - - -class DummyBackgroundConsumer(object): - started = False - stopped = False - is_active = True - - def __init__(self, rpc, on_snapshot): - self._rpc = rpc - self.on_snapshot = on_snapshot - - def start(self): - self.started = True - - def stop(self): - self.stopped = True - self.is_active = False - - -class DummyThread(object): - started = False - - def __init__(self, name, target, kwargs): - self.name = name - self.target = target - self.kwargs = kwargs - - def start(self): - self.started = True - - -class DummyThreading(object): - def __init__(self): - self.threads = {} - - def Thread(self, name, target, kwargs): - thread = DummyThread(name, target, kwargs) - self.threads[name] = thread - return thread - - -class DummyRpc(object): - def __init__(self, listen, initial_request, should_recover, metadata=None): - self.listen = listen - self.initial_request = initial_request - self.should_recover = should_recover - self.closed = False - self.callbacks = [] - self._metadata = metadata - - def add_done_callback(self, callback): - self.callbacks.append(callback) - - def close(self): - self.closed = True - - -class DummyCause(object): - code = 1 - message = "hi" - - -class DummyChange(object): - def __init__(self): - self.target_ids = [] - self.removed_target_ids = [] - self.read_time = 0 - self.target_change_type = firestore_pb2.TargetChange.NO_CHANGE - self.resume_token = None - self.cause = DummyCause() - - -class DummyProto(object): - def __init__(self): - self.target_change = DummyChange() - self.document_change = DummyChange() - - -class DummyTarget(object): - def QueryTarget(self, **kw): - self.kw = kw - return "dummy query target" - - -class DummyPb2(object): - - Target = DummyTarget() - - def ListenRequest(self, **kw): - pass diff --git a/firestore/tests/unit/v1beta1/testdata/create-all-transforms.textproto b/firestore/tests/unit/v1beta1/testdata/create-all-transforms.textproto deleted file mode 100644 index bbdf19e4df4a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-all-transforms.textproto +++ /dev/null @@ -1,64 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can be created with any amount of transforms. - -description: "create: all transforms in a single call" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-multi.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove-multi.textproto deleted file mode 100644 index f80d65b2381a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-multi.textproto +++ /dev/null @@ -1,61 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayRemove field. Since all the ArrayRemove -# fields are removed, the only field in the update is "a". - -description: "create: multiple ArrayRemove fields" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove-nested.textproto deleted file mode 100644 index 97756c306c18..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-nested.textproto +++ /dev/null @@ -1,48 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayRemove value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "create: nested ArrayRemove field" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray-nested.textproto deleted file mode 100644 index 4ec0cb3b9376..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayRemove. Firestore transforms don't support array indexing. - -description: "create: ArrayRemove cannot be anywhere inside an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray.textproto deleted file mode 100644 index 969b8d9dd84e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "create: ArrayRemove cannot be in an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove-with-st.textproto deleted file mode 100644 index b6ea3224de73..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "create: The ServerTimestamp sentinel cannot be in an ArrayUnion" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayremove.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayremove.textproto deleted file mode 100644 index e8e4bb3980db..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayremove.textproto +++ /dev/null @@ -1,47 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayRemove is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "create: ArrayRemove with data" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-multi.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion-multi.textproto deleted file mode 100644 index ec3cb72f5b1b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-multi.textproto +++ /dev/null @@ -1,61 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayUnion field. Since all the ArrayUnion -# fields are removed, the only field in the update is "a". - -description: "create: multiple ArrayUnion fields" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - append_missing_elements: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion-nested.textproto deleted file mode 100644 index e6e81bc1d7a2..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-nested.textproto +++ /dev/null @@ -1,48 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayUnion value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "create: nested ArrayUnion field" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray-nested.textproto deleted file mode 100644 index 4c0afe443048..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayUnion. Firestore transforms don't support array indexing. - -description: "create: ArrayUnion cannot be anywhere inside an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray.textproto deleted file mode 100644 index 7b791fa4154d..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "create: ArrayUnion cannot be in an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion-with-st.textproto deleted file mode 100644 index a1bf4a90d1c4..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "create: The ServerTimestamp sentinel cannot be in an ArrayUnion" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-arrayunion.textproto b/firestore/tests/unit/v1beta1/testdata/create-arrayunion.textproto deleted file mode 100644 index 98cb6ad8acb1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-arrayunion.textproto +++ /dev/null @@ -1,47 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayUnion is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "create: ArrayUnion with data" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-basic.textproto b/firestore/tests/unit/v1beta1/testdata/create-basic.textproto deleted file mode 100644 index 433ffda72704..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-basic.textproto +++ /dev/null @@ -1,27 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A simple call, resulting in a single update operation. - -description: "create: basic" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-complex.textproto b/firestore/tests/unit/v1beta1/testdata/create-complex.textproto deleted file mode 100644 index 00a994e204a2..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-complex.textproto +++ /dev/null @@ -1,61 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A call to a write method with complicated input data. - -description: "create: complex" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - array_value: < - values: < - integer_value: 1 - > - values: < - double_value: 2.5 - > - > - > - > - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "c" - value: < - array_value: < - values: < - string_value: "three" - > - values: < - map_value: < - fields: < - key: "d" - value: < - boolean_value: true - > - > - > - > - > - > - > - > - > - > - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-del-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-del-noarray-nested.textproto deleted file mode 100644 index 60694e137163..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-del-noarray-nested.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "create: Delete cannot be anywhere inside an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-del-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/create-del-noarray.textproto deleted file mode 100644 index 5731be1c7357..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-del-noarray.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "create: Delete cannot be in an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"Delete\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-empty.textproto b/firestore/tests/unit/v1beta1/testdata/create-empty.textproto deleted file mode 100644 index 2b6fec7efafd..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-empty.textproto +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - - -description: "create: creating or setting an empty map" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-nodel.textproto b/firestore/tests/unit/v1beta1/testdata/create-nodel.textproto deleted file mode 100644 index c878814b1128..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-nodel.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel cannot be used in Create, or in Set without a Merge option. - -description: "create: Delete cannot appear in data" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"Delete\"}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-nosplit.textproto b/firestore/tests/unit/v1beta1/testdata/create-nosplit.textproto deleted file mode 100644 index e9e1ee2755f5..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-nosplit.textproto +++ /dev/null @@ -1,40 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Create and Set treat their map keys literally. They do not split on dots. - -description: "create: don\342\200\231t split on dots" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a.b" - value: < - map_value: < - fields: < - key: "c.d" - value: < - integer_value: 1 - > - > - > - > - > - fields: < - key: "e" - value: < - integer_value: 2 - > - > - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-special-chars.textproto b/firestore/tests/unit/v1beta1/testdata/create-special-chars.textproto deleted file mode 100644 index 3a7acd3075de..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-special-chars.textproto +++ /dev/null @@ -1,41 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Create and Set treat their map keys literally. They do not escape special -# characters. - -description: "create: non-alpha characters in map keys" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "*" - value: < - map_value: < - fields: < - key: "." - value: < - integer_value: 1 - > - > - > - > - > - fields: < - key: "~" - value: < - integer_value: 2 - > - > - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-alone.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-alone.textproto deleted file mode 100644 index 9803a676bbe0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-alone.textproto +++ /dev/null @@ -1,26 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ServerTimestamps, then no update operation -# should be produced. - -description: "create: ServerTimestamp alone" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - set_to_server_value: REQUEST_TIME - > - > - current_document: < - exists: false - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-multi.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-multi.textproto deleted file mode 100644 index cb3db480999a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-multi.textproto +++ /dev/null @@ -1,41 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ServerTimestamp field. Since all the -# ServerTimestamp fields are removed, the only field in the update is "a". - -description: "create: multiple ServerTimestamp fields" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c.d" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-nested.textproto deleted file mode 100644 index 6bc03e8e7ca0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-nested.textproto +++ /dev/null @@ -1,38 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A ServerTimestamp value can occur at any depth. In this case, the transform -# applies to the field path "b.c". Since "c" is removed from the update, "b" -# becomes empty, so it is also removed from the update. - -description: "create: nested ServerTimestamp field" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-noarray-nested.textproto deleted file mode 100644 index 0cec0aebd4bf..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. - -description: "create: ServerTimestamp cannot be anywhere inside an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-noarray.textproto deleted file mode 100644 index 56d91c2cfb5a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. Firestore transforms -# don't support array indexing. - -description: "create: ServerTimestamp cannot be in an array value" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st-with-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/create-st-with-empty-map.textproto deleted file mode 100644 index 37e7e074abec..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st-with-empty-map.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a ServerTimestamp and a map both reside inside a map, the ServerTimestamp -# should be stripped out but the empty map should remain. - -description: "create: ServerTimestamp beside an empty map" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - map_value: < - > - > - > - > - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/create-st.textproto b/firestore/tests/unit/v1beta1/testdata/create-st.textproto deleted file mode 100644 index ddfc6a177e16..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/create-st.textproto +++ /dev/null @@ -1,39 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with the special ServerTimestamp sentinel is removed from the data in the -# update operation. Instead it appears in a separate Transform operation. Note -# that in these tests, the string "ServerTimestamp" should be replaced with the -# special ServerTimestamp value. - -description: "create: ServerTimestamp with data" -create: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - current_document: < - exists: false - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/delete-exists-precond.textproto b/firestore/tests/unit/v1beta1/testdata/delete-exists-precond.textproto deleted file mode 100644 index c9cf2ddea4e6..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/delete-exists-precond.textproto +++ /dev/null @@ -1,21 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Delete supports an exists precondition. - -description: "delete: delete with exists precondition" -delete: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - exists: true - > - request: < - database: "projects/projectID/databases/(default)" - writes: < - delete: "projects/projectID/databases/(default)/documents/C/d" - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/delete-no-precond.textproto b/firestore/tests/unit/v1beta1/testdata/delete-no-precond.textproto deleted file mode 100644 index a396cdb8c4a1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/delete-no-precond.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ordinary Delete call. - -description: "delete: delete without precondition" -delete: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - request: < - database: "projects/projectID/databases/(default)" - writes: < - delete: "projects/projectID/databases/(default)/documents/C/d" - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/delete-time-precond.textproto b/firestore/tests/unit/v1beta1/testdata/delete-time-precond.textproto deleted file mode 100644 index 5798f5f3b2fc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/delete-time-precond.textproto +++ /dev/null @@ -1,25 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Delete supports a last-update-time precondition. - -description: "delete: delete with last-update-time precondition" -delete: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - update_time: < - seconds: 42 - > - > - request: < - database: "projects/projectID/databases/(default)" - writes: < - delete: "projects/projectID/databases/(default)/documents/C/d" - current_document: < - update_time: < - seconds: 42 - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/get-basic.textproto b/firestore/tests/unit/v1beta1/testdata/get-basic.textproto deleted file mode 100644 index 2a448168255b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/get-basic.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A call to DocumentRef.Get. - -description: "get: get a document" -get: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - request: < - name: "projects/projectID/databases/(default)/documents/C/d" - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-add-mod-del-add.textproto b/firestore/tests/unit/v1beta1/testdata/listen-add-mod-del-add.textproto deleted file mode 100644 index 1aa8dcbc3645..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-add-mod-del-add.textproto +++ /dev/null @@ -1,246 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Various changes to a single document. - -description: "listen: add a doc, modify it, delete it, then add it again" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - responses: < - document_delete: < - document: "projects/projectID/databases/(default)/documents/C/d1" - > - > - responses: < - target_change: < - read_time: < - seconds: 3 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 4 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - changes: < - kind: MODIFIED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - > - read_time: < - seconds: 2 - > - > - snapshots: < - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - new_index: -1 - > - read_time: < - seconds: 3 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - old_index: -1 - > - read_time: < - seconds: 4 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-add-one.textproto b/firestore/tests/unit/v1beta1/testdata/listen-add-one.textproto deleted file mode 100644 index 2ad1d8e976da..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-add-one.textproto +++ /dev/null @@ -1,79 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Snapshot with a single document. - -description: "listen: add a doc" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-add-three.textproto b/firestore/tests/unit/v1beta1/testdata/listen-add-three.textproto deleted file mode 100644 index ac846f76260d..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-add-three.textproto +++ /dev/null @@ -1,190 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A snapshot with three documents. The documents are sorted first by the "a" -# field, then by their path. The changes are ordered the same way. - -description: "listen: add three documents" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 2 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-doc-remove.textproto b/firestore/tests/unit/v1beta1/testdata/listen-doc-remove.textproto deleted file mode 100644 index 975200f97363..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-doc-remove.textproto +++ /dev/null @@ -1,115 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The DocumentRemove response behaves exactly like DocumentDelete. - -description: "listen: DocumentRemove behaves like DocumentDelete" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_remove: < - document: "projects/projectID/databases/(default)/documents/C/d1" - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - new_index: -1 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-empty.textproto b/firestore/tests/unit/v1beta1/testdata/listen-empty.textproto deleted file mode 100644 index 4d04b79096c7..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-empty.textproto +++ /dev/null @@ -1,25 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There are no changes, so the snapshot should be empty. - -description: "listen: no changes; empty snapshot" -listen: < - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - snapshots: < - read_time: < - seconds: 1 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-filter-nop.textproto b/firestore/tests/unit/v1beta1/testdata/listen-filter-nop.textproto deleted file mode 100644 index 48fd72d3ae12..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-filter-nop.textproto +++ /dev/null @@ -1,247 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Filter response whose count matches the size of the current state (docs in -# last snapshot + docs added - docs deleted) is a no-op. - -description: "listen: Filter response with same size is a no-op" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_delete: < - document: "projects/projectID/databases/(default)/documents/C/d1" - > - > - responses: < - filter: < - count: 2 - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: 1 - new_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 1 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-multi-docs.textproto b/firestore/tests/unit/v1beta1/testdata/listen-multi-docs.textproto deleted file mode 100644 index 8778acc3d1e9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-multi-docs.textproto +++ /dev/null @@ -1,524 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Changes should be ordered with deletes first, then additions, then mods, each in -# query order. Old indices refer to the immediately previous state, not the -# previous snapshot - -description: "listen: multiple documents, added, deleted and updated" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d5" - fields: < - key: "a" - value: < - integer_value: 4 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_delete: < - document: "projects/projectID/databases/(default)/documents/C/d3" - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: -1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d6" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_delete: < - document: "projects/projectID/databases/(default)/documents/C/d2" - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: -2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 4 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 2 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 3 - > - read_time: < - seconds: 2 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: -2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: -1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d6" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d5" - fields: < - key: "a" - value: < - integer_value: 4 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - new_index: -1 - > - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - new_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d6" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 2 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d5" - fields: < - key: "a" - value: < - integer_value: 4 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 3 - > - changes: < - kind: MODIFIED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d4" - fields: < - key: "a" - value: < - integer_value: -2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - > - changes: < - kind: MODIFIED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: -1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - old_index: 1 - new_index: 1 - > - read_time: < - seconds: 4 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-nocurrent.textproto b/firestore/tests/unit/v1beta1/testdata/listen-nocurrent.textproto deleted file mode 100644 index 24239b6456f9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-nocurrent.textproto +++ /dev/null @@ -1,141 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the watch state is not marked CURRENT, no snapshot is issued. - -description: "listen: no snapshot if we don't see CURRENT" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - old_index: -1 - new_index: 1 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-nomod.textproto b/firestore/tests/unit/v1beta1/testdata/listen-nomod.textproto deleted file mode 100644 index 2a99edc350c8..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-nomod.textproto +++ /dev/null @@ -1,143 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Document updates are recognized by a change in the update time, not the data. -# This shouldn't actually happen. It is just a test of the update logic. - -description: "listen: add a doc, then change it but without changing its update time" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - responses: < - document_delete: < - document: "projects/projectID/databases/(default)/documents/C/d1" - > - > - responses: < - target_change: < - read_time: < - seconds: 3 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - new_index: -1 - > - read_time: < - seconds: 3 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-removed-target-ids.textproto b/firestore/tests/unit/v1beta1/testdata/listen-removed-target-ids.textproto deleted file mode 100644 index 1e8ead2d8048..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-removed-target-ids.textproto +++ /dev/null @@ -1,131 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A DocumentChange with the watch target ID in the removed_target_ids field is the -# same as deleting a document. - -description: "listen: DocumentChange with removed_target_id is like a delete." -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - removed_target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - new_index: -1 - > - read_time: < - seconds: 2 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-reset.textproto b/firestore/tests/unit/v1beta1/testdata/listen-reset.textproto deleted file mode 100644 index 89a75df2783a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-reset.textproto +++ /dev/null @@ -1,382 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A RESET message turns off the CURRENT state, and marks all documents as deleted. - -# If a document appeared on the stream but was never part of a snapshot ("d3" in -# this test), a reset will make it disappear completely. - -# For a snapshot to happen at a NO_CHANGE reponse, we need to have both seen a -# CURRENT response, and have a change from the previous snapshot. Here, after the -# reset, we see the same version of d2 again. That doesn't result in a snapshot. - -description: "listen: RESET turns off CURRENT" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: RESET - > - > - responses: < - target_change: < - read_time: < - seconds: 2 - > - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 3 - > - > - > - responses: < - target_change: < - target_change_type: RESET - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - read_time: < - seconds: 4 - > - > - > - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - read_time: < - seconds: 5 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - old_index: -1 - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - new_index: 1 - > - read_time: < - seconds: 1 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - changes: < - kind: REMOVED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 2 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: 1 - new_index: -1 - > - changes: < - kind: MODIFIED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - > - read_time: < - seconds: 3 - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d2" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 3 - > - > - docs: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d3" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 2 - > - > - old_index: -1 - new_index: 1 - > - read_time: < - seconds: 5 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-target-add-nop.textproto b/firestore/tests/unit/v1beta1/testdata/listen-target-add-nop.textproto deleted file mode 100644 index 3fa7cce56e27..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-target-add-nop.textproto +++ /dev/null @@ -1,88 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A TargetChange_ADD response must have the same watch target ID. - -description: "listen: TargetChange_ADD is a no-op if it has the same target ID" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - target_change_type: ADD - target_ids: 1 - read_time: < - seconds: 2 - > - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - snapshots: < - docs: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - changes: < - kind: ADDED - doc: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - old_index: -1 - > - read_time: < - seconds: 1 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-target-add-wrong-id.textproto b/firestore/tests/unit/v1beta1/testdata/listen-target-add-wrong-id.textproto deleted file mode 100644 index 87544637b50b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-target-add-wrong-id.textproto +++ /dev/null @@ -1,50 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A TargetChange_ADD response must have the same watch target ID. - -description: "listen: TargetChange_ADD is an error if it has a different target ID" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - target_change_type: ADD - target_ids: 2 - read_time: < - seconds: 2 - > - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/listen-target-remove.textproto b/firestore/tests/unit/v1beta1/testdata/listen-target-remove.textproto deleted file mode 100644 index f34b0890c3f0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/listen-target-remove.textproto +++ /dev/null @@ -1,46 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A TargetChange_REMOVE response should never be sent. - -description: "listen: TargetChange_REMOVE should not appear" -listen: < - responses: < - document_change: < - document: < - name: "projects/projectID/databases/(default)/documents/C/d1" - fields: < - key: "a" - value: < - integer_value: 3 - > - > - create_time: < - seconds: 1 - > - update_time: < - seconds: 1 - > - > - target_ids: 1 - > - > - responses: < - target_change: < - target_change_type: CURRENT - > - > - responses: < - target_change: < - target_change_type: REMOVE - > - > - responses: < - target_change: < - read_time: < - seconds: 1 - > - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-arrayremove-cursor.textproto b/firestore/tests/unit/v1beta1/testdata/query-arrayremove-cursor.textproto deleted file mode 100644 index 3c926da963e6..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-arrayremove-cursor.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove is not permitted in queries. - -description: "query: ArrayRemove in cursor method" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - json_values: "[\"ArrayRemove\", 1, 2, 3]" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-arrayremove-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-arrayremove-where.textproto deleted file mode 100644 index 000b76350e01..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-arrayremove-where.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove is not permitted in queries. - -description: "query: ArrayRemove in Where" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "[\"ArrayRemove\", 1, 2, 3]" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-arrayunion-cursor.textproto b/firestore/tests/unit/v1beta1/testdata/query-arrayunion-cursor.textproto deleted file mode 100644 index e8a61104d1b3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-arrayunion-cursor.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion is not permitted in queries. - -description: "query: ArrayUnion in cursor method" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - json_values: "[\"ArrayUnion\", 1, 2, 3]" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-arrayunion-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-arrayunion-where.textproto deleted file mode 100644 index 94923134e2b1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-arrayunion-where.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion is not permitted in queries. - -description: "query: ArrayUnion in Where" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "[\"ArrayUnion\", 1, 2, 3]" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-bad-NaN.textproto b/firestore/tests/unit/v1beta1/testdata/query-bad-NaN.textproto deleted file mode 100644 index 6806dd04ab27..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-bad-NaN.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# You can only compare NaN for equality. - -description: "query: where clause with non-== comparison with NaN" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "<" - json_value: "\"NaN\"" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-bad-null.textproto b/firestore/tests/unit/v1beta1/testdata/query-bad-null.textproto deleted file mode 100644 index 7fdfb3f2b5dd..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-bad-null.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# You can only compare Null for equality. - -description: "query: where clause with non-== comparison with Null" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: ">" - json_value: "null" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-order.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-order.textproto deleted file mode 100644 index bab8601e8d6c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-order.textproto +++ /dev/null @@ -1,68 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a document snapshot is used, the client appends a __name__ order-by clause -# with the direction of the last order-by clause. - -description: "query: cursor methods with a document snapshot, existing orderBy" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - order_by: < - path: < - field: "b" - > - direction: "desc" - > - > - clauses: < - start_after: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - order_by: < - field: < - field_path: "b" - > - direction: DESCENDING - > - order_by: < - field: < - field_path: "__name__" - > - direction: DESCENDING - > - start_at: < - values: < - integer_value: 7 - > - values: < - integer_value: 8 - > - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-orderby-name.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-orderby-name.textproto deleted file mode 100644 index d0ce3df45a2f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-orderby-name.textproto +++ /dev/null @@ -1,76 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If there is an existing orderBy clause on __name__, no changes are made to the -# list of orderBy clauses. - -description: "query: cursor method, doc snapshot, existing orderBy __name__" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "desc" - > - > - clauses: < - order_by: < - path: < - field: "__name__" - > - direction: "asc" - > - > - clauses: < - start_at: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - clauses: < - end_at: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: DESCENDING - > - order_by: < - field: < - field_path: "__name__" - > - direction: ASCENDING - > - start_at: < - values: < - integer_value: 7 - > - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - before: true - > - end_at: < - values: < - integer_value: 7 - > - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-eq.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-eq.textproto deleted file mode 100644 index 8b1e217df5f2..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-eq.textproto +++ /dev/null @@ -1,53 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Where clause using equality doesn't change the implicit orderBy clauses. - -description: "query: cursor methods with a document snapshot and an equality where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "3" - > - > - clauses: < - end_at: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - where: < - field_filter: < - field: < - field_path: "a" - > - op: EQUAL - value: < - integer_value: 3 - > - > - > - order_by: < - field: < - field_path: "__name__" - > - direction: ASCENDING - > - end_at: < - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq-orderby.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq-orderby.textproto deleted file mode 100644 index a69edfc50d11..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq-orderby.textproto +++ /dev/null @@ -1,72 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If there is an OrderBy clause, the inequality Where clause does not result in a -# new OrderBy clause. We still add a __name__ OrderBy clause - -description: "query: cursor method, doc snapshot, inequality where clause, and existing orderBy clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "desc" - > - > - clauses: < - where: < - path: < - field: "a" - > - op: "<" - json_value: "4" - > - > - clauses: < - start_at: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - where: < - field_filter: < - field: < - field_path: "a" - > - op: LESS_THAN - value: < - integer_value: 4 - > - > - > - order_by: < - field: < - field_path: "a" - > - direction: DESCENDING - > - order_by: < - field: < - field_path: "__name__" - > - direction: DESCENDING - > - start_at: < - values: < - integer_value: 7 - > - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq.textproto deleted file mode 100644 index 871dd0ba3392..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap-where-neq.textproto +++ /dev/null @@ -1,64 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Where clause with an inequality results in an OrderBy clause on that clause's -# path, if there are no other OrderBy clauses. - -description: "query: cursor method with a document snapshot and an inequality where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "<=" - json_value: "3" - > - > - clauses: < - end_before: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - where: < - field_filter: < - field: < - field_path: "a" - > - op: LESS_THAN_OR_EQUAL - value: < - integer_value: 3 - > - > - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - order_by: < - field: < - field_path: "__name__" - > - direction: ASCENDING - > - end_at: < - values: < - integer_value: 7 - > - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap.textproto deleted file mode 100644 index 184bffc2d326..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-docsnap.textproto +++ /dev/null @@ -1,34 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a document snapshot is used, the client appends a __name__ order-by clause. - -description: "query: cursor methods with a document snapshot" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - start_at: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "__name__" - > - direction: ASCENDING - > - start_at: < - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D" - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty-map.textproto deleted file mode 100644 index c197d23afe16..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty-map.textproto +++ /dev/null @@ -1,41 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods are allowed to use empty maps with EndBefore. It should result in -# an empty map in the query. - -description: "query: EndBefore with explicit empty map" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - json_values: "{}" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - end_at: < - values: < - map_value: < - > - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty.textproto deleted file mode 100644 index a41775abf074..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-endbefore-empty.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods are not allowed to use empty values with EndBefore. It should -# result in an error. - -description: "query: EndBefore with empty values" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-no-order.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-no-order.textproto deleted file mode 100644 index fb999ddabb0f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-no-order.textproto +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a cursor method with a list of values is provided, there must be at least as -# many explicit orderBy clauses as values. - -description: "query: cursor method without orderBy" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - start_at: < - json_values: "2" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty-map.textproto deleted file mode 100644 index 557aca2c9194..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty-map.textproto +++ /dev/null @@ -1,41 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods are allowed to use empty maps with StartAt. It should result in -# an empty map in the query. - -description: "query: StartAt with explicit empty map" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - start_at: < - json_values: "{}" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - start_at: < - values: < - map_value: < - > - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty.textproto deleted file mode 100644 index e0c54d98a6cc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-startat-empty.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods are not allowed to use empty values with StartAt. It should -# result in an error. - -description: "query: StartAt with empty values" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - start_at: < - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1a.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1a.textproto deleted file mode 100644 index bb08ab7d4d5b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1a.textproto +++ /dev/null @@ -1,50 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods take the same number of values as there are OrderBy clauses. - -description: "query: StartAt/EndBefore with values" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - start_at: < - json_values: "7" - > - > - clauses: < - end_before: < - json_values: "9" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - start_at: < - values: < - integer_value: 7 - > - before: true - > - end_at: < - values: < - integer_value: 9 - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1b.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1b.textproto deleted file mode 100644 index 41e69e9e6f14..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-1b.textproto +++ /dev/null @@ -1,48 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods take the same number of values as there are OrderBy clauses. - -description: "query: StartAfter/EndAt with values" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - start_after: < - json_values: "7" - > - > - clauses: < - end_at: < - json_values: "9" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - start_at: < - values: < - integer_value: 7 - > - > - end_at: < - values: < - integer_value: 9 - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-2.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-2.textproto deleted file mode 100644 index 8e37ad0035fa..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-2.textproto +++ /dev/null @@ -1,71 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor methods take the same number of values as there are OrderBy clauses. - -description: "query: Start/End with two values" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - order_by: < - path: < - field: "b" - > - direction: "desc" - > - > - clauses: < - start_at: < - json_values: "7" - json_values: "8" - > - > - clauses: < - end_at: < - json_values: "9" - json_values: "10" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - order_by: < - field: < - field_path: "b" - > - direction: DESCENDING - > - start_at: < - values: < - integer_value: 7 - > - values: < - integer_value: 8 - > - before: true - > - end_at: < - values: < - integer_value: 9 - > - values: < - integer_value: 10 - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-docid.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-docid.textproto deleted file mode 100644 index 91af3486c998..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-docid.textproto +++ /dev/null @@ -1,50 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Cursor values corresponding to a __name__ field take the document path relative -# to the query's collection. - -description: "query: cursor methods with __name__" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "__name__" - > - direction: "asc" - > - > - clauses: < - start_after: < - json_values: "\"D1\"" - > - > - clauses: < - end_before: < - json_values: "\"D2\"" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "__name__" - > - direction: ASCENDING - > - start_at: < - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D1" - > - > - end_at: < - values: < - reference_value: "projects/projectID/databases/(default)/documents/C/D2" - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-last-wins.textproto b/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-last-wins.textproto deleted file mode 100644 index 9e8fbb19f336..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-cursor-vals-last-wins.textproto +++ /dev/null @@ -1,60 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When multiple Start* or End* calls occur, the values of the last one are used. - -description: "query: cursor methods, last one wins" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - start_after: < - json_values: "1" - > - > - clauses: < - start_at: < - json_values: "2" - > - > - clauses: < - end_at: < - json_values: "3" - > - > - clauses: < - end_before: < - json_values: "4" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "a" - > - direction: ASCENDING - > - start_at: < - values: < - integer_value: 2 - > - before: true - > - end_at: < - values: < - integer_value: 4 - > - before: true - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-del-cursor.textproto b/firestore/tests/unit/v1beta1/testdata/query-del-cursor.textproto deleted file mode 100644 index c9d4adb7c5dc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-del-cursor.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Sentinel values are not permitted in queries. - -description: "query: Delete in cursor method" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - json_values: "\"Delete\"" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-del-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-del-where.textproto deleted file mode 100644 index 8e92529492ea..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-del-where.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Sentinel values are not permitted in queries. - -description: "query: Delete in Where" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "\"Delete\"" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-invalid-operator.textproto b/firestore/tests/unit/v1beta1/testdata/query-invalid-operator.textproto deleted file mode 100644 index e580c64a759f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-invalid-operator.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The != operator is not supported. - -description: "query: invalid operator in Where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "!=" - json_value: "4" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-order.textproto b/firestore/tests/unit/v1beta1/testdata/query-invalid-path-order.textproto deleted file mode 100644 index e0a72057620c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-order.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The path has an empty component. - -description: "query: invalid path in OrderBy clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "*" - field: "" - > - direction: "asc" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-select.textproto b/firestore/tests/unit/v1beta1/testdata/query-invalid-path-select.textproto deleted file mode 100644 index 944f984f7fa9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-select.textproto +++ /dev/null @@ -1,18 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The path has an empty component. - -description: "query: invalid path in Where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - select: < - fields: < - field: "*" - field: "" - > - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-invalid-path-where.textproto deleted file mode 100644 index 527923b09799..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-invalid-path-where.textproto +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The path has an empty component. - -description: "query: invalid path in Where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "*" - field: "" - > - op: "==" - json_value: "4" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-offset-limit-last-wins.textproto b/firestore/tests/unit/v1beta1/testdata/query-offset-limit-last-wins.textproto deleted file mode 100644 index dc301f439e8d..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-offset-limit-last-wins.textproto +++ /dev/null @@ -1,30 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# With multiple Offset or Limit clauses, the last one wins. - -description: "query: multiple Offset and Limit clauses" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - offset: 2 - > - clauses: < - limit: 3 - > - clauses: < - limit: 4 - > - clauses: < - offset: 5 - > - query: < - from: < - collection_id: "C" - > - offset: 5 - limit: < - value: 4 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-offset-limit.textproto b/firestore/tests/unit/v1beta1/testdata/query-offset-limit.textproto deleted file mode 100644 index 136d9d46a615..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-offset-limit.textproto +++ /dev/null @@ -1,24 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Offset and Limit clauses. - -description: "query: Offset and Limit clauses" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - offset: 2 - > - clauses: < - limit: 3 - > - query: < - from: < - collection_id: "C" - > - offset: 2 - limit: < - value: 3 - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-order.textproto b/firestore/tests/unit/v1beta1/testdata/query-order.textproto deleted file mode 100644 index 7ed4c4ead840..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-order.textproto +++ /dev/null @@ -1,42 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Multiple OrderBy clauses combine. - -description: "query: basic OrderBy clauses" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "b" - > - direction: "asc" - > - > - clauses: < - order_by: < - path: < - field: "a" - > - direction: "desc" - > - > - query: < - from: < - collection_id: "C" - > - order_by: < - field: < - field_path: "b" - > - direction: ASCENDING - > - order_by: < - field: < - field_path: "a" - > - direction: DESCENDING - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-select-empty.textproto b/firestore/tests/unit/v1beta1/testdata/query-select-empty.textproto deleted file mode 100644 index def8b55ac515..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-select-empty.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An empty Select clause selects just the document ID. - -description: "query: empty Select clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - select: < - > - > - query: < - select: < - fields: < - field_path: "__name__" - > - > - from: < - collection_id: "C" - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-select-last-wins.textproto b/firestore/tests/unit/v1beta1/testdata/query-select-last-wins.textproto deleted file mode 100644 index bd78d09eb9b8..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-select-last-wins.textproto +++ /dev/null @@ -1,36 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The last Select clause is the only one used. - -description: "query: two Select clauses" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - select: < - fields: < - field: "a" - > - fields: < - field: "b" - > - > - > - clauses: < - select: < - fields: < - field: "c" - > - > - > - query: < - select: < - fields: < - field_path: "c" - > - > - from: < - collection_id: "C" - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-select.textproto b/firestore/tests/unit/v1beta1/testdata/query-select.textproto deleted file mode 100644 index 15e11249730c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-select.textproto +++ /dev/null @@ -1,32 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ordinary Select clause. - -description: "query: Select clause with some fields" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - select: < - fields: < - field: "a" - > - fields: < - field: "b" - > - > - > - query: < - select: < - fields: < - field_path: "a" - > - fields: < - field_path: "b" - > - > - from: < - collection_id: "C" - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-st-cursor.textproto b/firestore/tests/unit/v1beta1/testdata/query-st-cursor.textproto deleted file mode 100644 index 66885d0dd5dc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-st-cursor.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Sentinel values are not permitted in queries. - -description: "query: ServerTimestamp in cursor method" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - order_by: < - path: < - field: "a" - > - direction: "asc" - > - > - clauses: < - end_before: < - json_values: "\"ServerTimestamp\"" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-st-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-st-where.textproto deleted file mode 100644 index 05da28d54291..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-st-where.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Sentinel values are not permitted in queries. - -description: "query: ServerTimestamp in Where" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "\"ServerTimestamp\"" - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-where-2.textproto b/firestore/tests/unit/v1beta1/testdata/query-where-2.textproto deleted file mode 100644 index 1034463079e1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-where-2.textproto +++ /dev/null @@ -1,59 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Multiple Where clauses are combined into a composite filter. - -description: "query: two Where clauses" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: ">=" - json_value: "5" - > - > - clauses: < - where: < - path: < - field: "b" - > - op: "<" - json_value: "\"foo\"" - > - > - query: < - from: < - collection_id: "C" - > - where: < - composite_filter: < - op: AND - filters: < - field_filter: < - field: < - field_path: "a" - > - op: GREATER_THAN_OR_EQUAL - value: < - integer_value: 5 - > - > - > - filters: < - field_filter: < - field: < - field_path: "b" - > - op: LESS_THAN - value: < - string_value: "foo" - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-where-NaN.textproto b/firestore/tests/unit/v1beta1/testdata/query-where-NaN.textproto deleted file mode 100644 index 4a97ca7dde1f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-where-NaN.textproto +++ /dev/null @@ -1,31 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Where clause that tests for equality with NaN results in a unary filter. - -description: "query: a Where clause comparing to NaN" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "\"NaN\"" - > - > - query: < - from: < - collection_id: "C" - > - where: < - unary_filter: < - op: IS_NAN - field: < - field_path: "a" - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-where-null.textproto b/firestore/tests/unit/v1beta1/testdata/query-where-null.textproto deleted file mode 100644 index 1869c60c72aa..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-where-null.textproto +++ /dev/null @@ -1,31 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Where clause that tests for equality with null results in a unary filter. - -description: "query: a Where clause comparing to null" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: "==" - json_value: "null" - > - > - query: < - from: < - collection_id: "C" - > - where: < - unary_filter: < - op: IS_NULL - field: < - field_path: "a" - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-where.textproto b/firestore/tests/unit/v1beta1/testdata/query-where.textproto deleted file mode 100644 index 045c2befab88..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-where.textproto +++ /dev/null @@ -1,34 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A simple Where clause. - -description: "query: Where clause" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - where: < - path: < - field: "a" - > - op: ">" - json_value: "5" - > - > - query: < - from: < - collection_id: "C" - > - where: < - field_filter: < - field: < - field_path: "a" - > - op: GREATER_THAN - value: < - integer_value: 5 - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/query-wrong-collection.textproto b/firestore/tests/unit/v1beta1/testdata/query-wrong-collection.textproto deleted file mode 100644 index ad6f353d5fc9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/query-wrong-collection.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a document snapshot is passed to a Start*/End* method, it must be in the same -# collection as the query. - -description: "query: doc snapshot with wrong collection in cursor method" -query: < - coll_path: "projects/projectID/databases/(default)/documents/C" - clauses: < - end_before: < - doc_snapshot: < - path: "projects/projectID/databases/(default)/documents/C2/D" - json_data: "{\"a\": 7, \"b\": 8}" - > - > - > - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-all-transforms.textproto b/firestore/tests/unit/v1beta1/testdata/set-all-transforms.textproto deleted file mode 100644 index bf18f9a5b12a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-all-transforms.textproto +++ /dev/null @@ -1,61 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can be created with any amount of transforms. - -description: "set: all transforms in a single call" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-multi.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove-multi.textproto deleted file mode 100644 index 9b62fe191953..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-multi.textproto +++ /dev/null @@ -1,58 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayRemove field. Since all the ArrayRemove -# fields are removed, the only field in the update is "a". - -description: "set: multiple ArrayRemove fields" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove-nested.textproto deleted file mode 100644 index 617609c5a39e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-nested.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayRemove value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "set: nested ArrayRemove field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray-nested.textproto deleted file mode 100644 index 2efa34a59f19..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayRemove. Firestore transforms don't support array indexing. - -description: "set: ArrayRemove cannot be anywhere inside an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray.textproto deleted file mode 100644 index e7aa209ea22b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "set: ArrayRemove cannot be in an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove-with-st.textproto deleted file mode 100644 index 353025b59ff5..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "set: The ServerTimestamp sentinel cannot be in an ArrayUnion" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayremove.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayremove.textproto deleted file mode 100644 index 8aa6b60d0156..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayremove.textproto +++ /dev/null @@ -1,44 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayRemove is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "set: ArrayRemove with data" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-multi.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion-multi.textproto deleted file mode 100644 index e515bfa8d188..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-multi.textproto +++ /dev/null @@ -1,58 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayUnion field. Since all the ArrayUnion -# fields are removed, the only field in the update is "a". - -description: "set: multiple ArrayUnion fields" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - append_missing_elements: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion-nested.textproto deleted file mode 100644 index f8abeb0d0004..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-nested.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayUnion value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "set: nested ArrayUnion field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray-nested.textproto deleted file mode 100644 index 2b4170f431a3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayUnion. Firestore transforms don't support array indexing. - -description: "set: ArrayUnion cannot be anywhere inside an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray.textproto deleted file mode 100644 index e08af3a07f14..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "set: ArrayUnion cannot be in an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion-with-st.textproto deleted file mode 100644 index 37a7a132e750..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "set: The ServerTimestamp sentinel cannot be in an ArrayUnion" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-arrayunion.textproto b/firestore/tests/unit/v1beta1/testdata/set-arrayunion.textproto deleted file mode 100644 index 4751e0c0e322..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-arrayunion.textproto +++ /dev/null @@ -1,44 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayUnion is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "set: ArrayUnion with data" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-basic.textproto b/firestore/tests/unit/v1beta1/testdata/set-basic.textproto deleted file mode 100644 index e9b292e3cdc3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-basic.textproto +++ /dev/null @@ -1,24 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A simple call, resulting in a single update operation. - -description: "set: basic" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-complex.textproto b/firestore/tests/unit/v1beta1/testdata/set-complex.textproto deleted file mode 100644 index 6ec19500a2d0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-complex.textproto +++ /dev/null @@ -1,58 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A call to a write method with complicated input data. - -description: "set: complex" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - array_value: < - values: < - integer_value: 1 - > - values: < - double_value: 2.5 - > - > - > - > - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "c" - value: < - array_value: < - values: < - string_value: "three" - > - values: < - map_value: < - fields: < - key: "d" - value: < - boolean_value: true - > - > - > - > - > - > - > - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-merge-alone.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-merge-alone.textproto deleted file mode 100644 index 811ab8dfe7bb..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-merge-alone.textproto +++ /dev/null @@ -1,28 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Delete sentinel can appear with a merge option. If the delete paths are the -# only ones to be merged, then no document is sent, just an update mask. - -description: "set-merge: Delete with merge" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "b" - field: "c" - > - > - json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - update_mask: < - field_paths: "b.c" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-merge.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-merge.textproto deleted file mode 100644 index b8d8631051e7..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-merge.textproto +++ /dev/null @@ -1,37 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Delete sentinel can appear with a merge option. - -description: "set-merge: Delete with merge" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - fields: < - field: "b" - field: "c" - > - > - json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b.c" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-mergeall.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-mergeall.textproto deleted file mode 100644 index af1e84524bca..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-mergeall.textproto +++ /dev/null @@ -1,31 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A Delete sentinel can appear with a mergeAll option. - -description: "set: Delete with MergeAll" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{\"a\": 1, \"b\": {\"c\": \"Delete\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b.c" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-noarray-nested.textproto deleted file mode 100644 index bbf6a3d00af3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-noarray-nested.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "set: Delete cannot be anywhere inside an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-noarray.textproto deleted file mode 100644 index 07fc6497dc35..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-noarray.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "set: Delete cannot be in an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"Delete\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-nomerge.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-nomerge.textproto deleted file mode 100644 index cb6ef4f85870..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-nomerge.textproto +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The client signals an error if the Delete sentinel is in the input data, but not -# selected by a merge option, because this is most likely a programming bug. - -description: "set-merge: Delete cannot appear in an unmerged field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - > - json_data: "{\"a\": 1, \"b\": \"Delete\"}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-nonleaf.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-nonleaf.textproto deleted file mode 100644 index 54f22d95c521..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-nonleaf.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a Delete is part of the value at a merge path, then the user is confused: -# their merge path says "replace this entire value" but their Delete says "delete -# this part of the value". This should be an error, just as if they specified -# Delete in a Set with no merge. - -description: "set-merge: Delete cannot appear as part of a merge path" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "h" - > - > - json_data: "{\"h\": {\"g\": \"Delete\"}}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-del-wo-merge.textproto b/firestore/tests/unit/v1beta1/testdata/set-del-wo-merge.textproto deleted file mode 100644 index 29196628bfd8..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-del-wo-merge.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Without a merge option, Set replaces the document with the input data. A Delete -# sentinel in the data makes no sense in this case. - -description: "set: Delete cannot appear unless a merge option is specified" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"Delete\"}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-empty.textproto b/firestore/tests/unit/v1beta1/testdata/set-empty.textproto deleted file mode 100644 index c2b73d3ff933..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-empty.textproto +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - - -description: "set: creating or setting an empty map" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge-fp.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge-fp.textproto deleted file mode 100644 index 68690f6f1633..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge-fp.textproto +++ /dev/null @@ -1,40 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A merge with fields that use special characters. - -description: "set-merge: Merge with FieldPaths" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "*" - field: "~" - > - > - json_data: "{\"*\": {\"~\": true}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "*" - value: < - map_value: < - fields: < - key: "~" - value: < - boolean_value: true - > - > - > - > - > - > - update_mask: < - field_paths: "`*`.`~`" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge-nested.textproto deleted file mode 100644 index 0d1282818d76..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge-nested.textproto +++ /dev/null @@ -1,41 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A merge option where the field is not at top level. Only fields mentioned in the -# option are present in the update operation. - -description: "set-merge: Merge with a nested field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "h" - field: "g" - > - > - json_data: "{\"h\": {\"g\": 4, \"f\": 5}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "h" - value: < - map_value: < - fields: < - key: "g" - value: < - integer_value: 4 - > - > - > - > - > - > - update_mask: < - field_paths: "h.g" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge-nonleaf.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge-nonleaf.textproto deleted file mode 100644 index ca41cb03402d..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge-nonleaf.textproto +++ /dev/null @@ -1,46 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a field path is in a merge option, the value at that path replaces the stored -# value. That is true even if the value is complex. - -description: "set-merge: Merge field is not a leaf" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "h" - > - > - json_data: "{\"h\": {\"f\": 5, \"g\": 6}, \"e\": 7}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "h" - value: < - map_value: < - fields: < - key: "f" - value: < - integer_value: 5 - > - > - fields: < - key: "g" - value: < - integer_value: 6 - > - > - > - > - > - > - update_mask: < - field_paths: "h" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge-prefix.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge-prefix.textproto deleted file mode 100644 index 1e2c2c50226e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge-prefix.textproto +++ /dev/null @@ -1,21 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The prefix would make the other path meaningless, so this is probably a -# programming error. - -description: "set-merge: One merge path cannot be the prefix of another" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - fields: < - field: "a" - field: "b" - > - > - json_data: "{\"a\": {\"b\": 1}}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge-present.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge-present.textproto deleted file mode 100644 index f6665de5cdc3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge-present.textproto +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The client signals an error if a merge option mentions a path that is not in the -# input data. - -description: "set-merge: Merge fields must all be present in data" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "b" - > - fields: < - field: "a" - > - > - json_data: "{\"a\": 1}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-merge.textproto b/firestore/tests/unit/v1beta1/testdata/set-merge.textproto deleted file mode 100644 index 279125253cb1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-merge.textproto +++ /dev/null @@ -1,32 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Fields in the input data but not in a merge option are pruned. - -description: "set-merge: Merge with a field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - > - json_data: "{\"a\": 1, \"b\": 2}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-mergeall-empty.textproto b/firestore/tests/unit/v1beta1/testdata/set-mergeall-empty.textproto deleted file mode 100644 index 16df8a22bed3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-mergeall-empty.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# This is a valid call that can be used to ensure a document exists. - -description: "set: MergeAll can be specified with empty data." -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - update_mask: < - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-mergeall-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-mergeall-nested.textproto deleted file mode 100644 index 1fbc6973cd28..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-mergeall-nested.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# MergeAll with nested fields results in an update mask that includes entries for -# all the leaf fields. - -description: "set: MergeAll with nested fields" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{\"h\": { \"g\": 3, \"f\": 4 }}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "h" - value: < - map_value: < - fields: < - key: "f" - value: < - integer_value: 4 - > - > - fields: < - key: "g" - value: < - integer_value: 3 - > - > - > - > - > - > - update_mask: < - field_paths: "h.f" - field_paths: "h.g" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-mergeall.textproto b/firestore/tests/unit/v1beta1/testdata/set-mergeall.textproto deleted file mode 100644 index cb2ebc52bc06..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-mergeall.textproto +++ /dev/null @@ -1,37 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The MergeAll option with a simple piece of data. - -description: "set: MergeAll" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{\"a\": 1, \"b\": 2}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - fields: < - key: "b" - value: < - integer_value: 2 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-nodel.textproto b/firestore/tests/unit/v1beta1/testdata/set-nodel.textproto deleted file mode 100644 index 0fb887d461be..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-nodel.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel cannot be used in Create, or in Set without a Merge option. - -description: "set: Delete cannot appear in data" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"Delete\"}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-nosplit.textproto b/firestore/tests/unit/v1beta1/testdata/set-nosplit.textproto deleted file mode 100644 index 0ff3fadcf4ba..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-nosplit.textproto +++ /dev/null @@ -1,37 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Create and Set treat their map keys literally. They do not split on dots. - -description: "set: don\342\200\231t split on dots" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a.b" - value: < - map_value: < - fields: < - key: "c.d" - value: < - integer_value: 1 - > - > - > - > - > - fields: < - key: "e" - value: < - integer_value: 2 - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-special-chars.textproto b/firestore/tests/unit/v1beta1/testdata/set-special-chars.textproto deleted file mode 100644 index f4122c9f004c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-special-chars.textproto +++ /dev/null @@ -1,38 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Create and Set treat their map keys literally. They do not escape special -# characters. - -description: "set: non-alpha characters in map keys" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "*" - value: < - map_value: < - fields: < - key: "." - value: < - integer_value: 1 - > - > - > - > - > - fields: < - key: "~" - value: < - integer_value: 2 - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-alone-mergeall.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-alone-mergeall.textproto deleted file mode 100644 index 16ce4cfbd913..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-alone-mergeall.textproto +++ /dev/null @@ -1,26 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ServerTimestamps, then no update operation -# should be produced. - -description: "set: ServerTimestamp alone with MergeAll" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{\"a\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-alone.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-alone.textproto deleted file mode 100644 index 6ce46d7f1ab5..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-alone.textproto +++ /dev/null @@ -1,28 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ServerTimestamps, then an update operation -# with an empty map should be produced. - -description: "set: ServerTimestamp alone" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-merge-both.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-merge-both.textproto deleted file mode 100644 index 5cc7bbc9efbf..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-merge-both.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Just as when no merge option is specified, ServerTimestamp sentinel values are -# removed from the data in the update operation and become transforms. - -description: "set-merge: ServerTimestamp with Merge of both fields" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - fields: < - field: "b" - > - > - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf-alone.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf-alone.textproto deleted file mode 100644 index f513b6c804c5..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf-alone.textproto +++ /dev/null @@ -1,37 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a field path is in a merge option, the value at that path replaces the stored -# value. If the value has only ServerTimestamps, they become transforms and we -# clear the value by including the field path in the update mask. - -description: "set-merge: non-leaf merge field with ServerTimestamp alone" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "h" - > - > - json_data: "{\"h\": {\"g\": \"ServerTimestamp\"}, \"e\": 7}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - update_mask: < - field_paths: "h" - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "h.g" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf.textproto deleted file mode 100644 index e53e7e2682eb..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nonleaf.textproto +++ /dev/null @@ -1,49 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a field path is in a merge option, the value at that path replaces the stored -# value, and ServerTimestamps inside that value become transforms as usual. - -description: "set-merge: non-leaf merge field with ServerTimestamp" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "h" - > - > - json_data: "{\"h\": {\"f\": 5, \"g\": \"ServerTimestamp\"}, \"e\": 7}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "h" - value: < - map_value: < - fields: < - key: "f" - value: < - integer_value: 5 - > - > - > - > - > - > - update_mask: < - field_paths: "h" - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "h.g" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nowrite.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-merge-nowrite.textproto deleted file mode 100644 index 3222230dc510..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-merge-nowrite.textproto +++ /dev/null @@ -1,28 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If all the fields in the merge option have ServerTimestamp values, then no -# update operation is produced, only a transform. - -description: "set-merge: If no ordinary values in Merge, no write" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "b" - > - > - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-mergeall.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-mergeall.textproto deleted file mode 100644 index b8c53a566fdd..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-mergeall.textproto +++ /dev/null @@ -1,40 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Just as when no merge option is specified, ServerTimestamp sentinel values are -# removed from the data in the update operation and become transforms. - -description: "set: ServerTimestamp with MergeAll" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - all: true - > - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-multi.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-multi.textproto deleted file mode 100644 index 375ec18d68fd..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-multi.textproto +++ /dev/null @@ -1,38 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ServerTimestamp field. Since all the -# ServerTimestamp fields are removed, the only field in the update is "a". - -description: "set: multiple ServerTimestamp fields" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c.d" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-nested.textproto deleted file mode 100644 index abfd2e8fd874..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-nested.textproto +++ /dev/null @@ -1,35 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A ServerTimestamp value can occur at any depth. In this case, the transform -# applies to the field path "b.c". Since "c" is removed from the update, "b" -# becomes empty, so it is also removed from the update. - -description: "set: nested ServerTimestamp field" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-noarray-nested.textproto deleted file mode 100644 index 241d79151a42..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. - -description: "set: ServerTimestamp cannot be anywhere inside an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-noarray.textproto deleted file mode 100644 index 591fb0343854..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. Firestore transforms -# don't support array indexing. - -description: "set: ServerTimestamp cannot be in an array value" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-nomerge.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-nomerge.textproto deleted file mode 100644 index 20c0ae1fbb0e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-nomerge.textproto +++ /dev/null @@ -1,33 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the ServerTimestamp value is not mentioned in a merge option, then it is -# pruned from the data but does not result in a transform. - -description: "set-merge: If is ServerTimestamp not in Merge, no transform" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - option: < - fields: < - field: "a" - > - > - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st-with-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/set-st-with-empty-map.textproto deleted file mode 100644 index 5e187983f995..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st-with-empty-map.textproto +++ /dev/null @@ -1,42 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a ServerTimestamp and a map both reside inside a map, the ServerTimestamp -# should be stripped out but the empty map should remain. - -description: "set: ServerTimestamp beside an empty map" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - map_value: < - > - > - > - > - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/set-st.textproto b/firestore/tests/unit/v1beta1/testdata/set-st.textproto deleted file mode 100644 index 8bceddceeacc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/set-st.textproto +++ /dev/null @@ -1,36 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with the special ServerTimestamp sentinel is removed from the data in the -# update operation. Instead it appears in a separate Transform operation. Note -# that in these tests, the string "ServerTimestamp" should be replaced with the -# special ServerTimestamp value. - -description: "set: ServerTimestamp with data" -set: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/test-suite.binproto b/firestore/tests/unit/v1beta1/testdata/test-suite.binproto deleted file mode 100644 index 6e3ce3973752..000000000000 Binary files a/firestore/tests/unit/v1beta1/testdata/test-suite.binproto and /dev/null differ diff --git a/firestore/tests/unit/v1beta1/testdata/update-all-transforms.textproto b/firestore/tests/unit/v1beta1/testdata/update-all-transforms.textproto deleted file mode 100644 index 225cc61e405e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-all-transforms.textproto +++ /dev/null @@ -1,67 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can be created with any amount of transforms. - -description: "update: all transforms in a single call" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": [\"ArrayUnion\", 1, 2, 3], \"d\": [\"ArrayRemove\", 4, 5, 6]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-alone.textproto deleted file mode 100644 index 8c79a31d5052..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-alone.textproto +++ /dev/null @@ -1,36 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ArrayRemove, then no update operation should -# be produced. - -description: "update: ArrayRemove alone" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayRemove\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-multi.textproto deleted file mode 100644 index 2362b6e09458..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-multi.textproto +++ /dev/null @@ -1,69 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayRemove field. Since all the ArrayRemove -# fields are removed, the only field in the update is "a". - -# b is not in the mask because it will be set in the transform. c must be in the -# mask: it should be replaced entirely. The transform will set c.d to the -# timestamp, but the update will delete the rest of c. - -description: "update: multiple ArrayRemove fields" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3], \"c\": {\"d\": [\"ArrayRemove\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-nested.textproto deleted file mode 100644 index 143790179eaf..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-nested.textproto +++ /dev/null @@ -1,52 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayRemove value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "update: nested ArrayRemove field" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayRemove\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray-nested.textproto deleted file mode 100644 index 04eca965c688..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayRemove. Firestore transforms don't support array indexing. - -description: "update: ArrayRemove cannot be anywhere inside an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray.textproto deleted file mode 100644 index bbd27bf017e1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "update: ArrayRemove cannot be in an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove-with-st.textproto deleted file mode 100644 index 4888b44f1c01..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "update: The ServerTimestamp sentinel cannot be in an ArrayUnion" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayRemove\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayremove.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayremove.textproto deleted file mode 100644 index 3b767cf486c3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayremove.textproto +++ /dev/null @@ -1,50 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayRemove is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "update: ArrayRemove with data" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayRemove\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-alone.textproto deleted file mode 100644 index ec12818da74c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-alone.textproto +++ /dev/null @@ -1,36 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ArrayUnion, then no update operation should -# be produced. - -description: "update: ArrayUnion alone" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayUnion\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-multi.textproto deleted file mode 100644 index 8edf6a3af046..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-multi.textproto +++ /dev/null @@ -1,69 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayUnion field. Since all the ArrayUnion -# fields are removed, the only field in the update is "a". - -# b is not in the mask because it will be set in the transform. c must be in the -# mask: it should be replaced entirely. The transform will set c.d to the -# timestamp, but the update will delete the rest of c. - -description: "update: multiple ArrayUnion fields" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3], \"c\": {\"d\": [\"ArrayUnion\", 4, 5, 6]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - append_missing_elements: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-nested.textproto deleted file mode 100644 index 217e2e2ca775..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-nested.textproto +++ /dev/null @@ -1,52 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayUnion value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "update: nested ArrayUnion field" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": [\"ArrayUnion\", 1, 2, 3]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray-nested.textproto deleted file mode 100644 index 0326781830ec..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayUnion. Firestore transforms don't support array indexing. - -description: "update: ArrayUnion cannot be anywhere inside an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray.textproto deleted file mode 100644 index c199f9f73c91..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "update: ArrayUnion cannot be in an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, [\"ArrayRemove\", 1, 2, 3]]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion-with-st.textproto deleted file mode 100644 index ee022f8492bc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion-with-st.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "update: The ServerTimestamp sentinel cannot be in an ArrayUnion" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [\"ArrayUnion\", 1, \"ServerTimestamp\", 3]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-arrayunion.textproto b/firestore/tests/unit/v1beta1/testdata/update-arrayunion.textproto deleted file mode 100644 index 81b240b891bb..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-arrayunion.textproto +++ /dev/null @@ -1,50 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayUnion is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "update: ArrayUnion with data" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": [\"ArrayUnion\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-badchar.textproto b/firestore/tests/unit/v1beta1/testdata/update-badchar.textproto deleted file mode 100644 index 656ff53b686a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-badchar.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The keys of the data given to Update are interpreted, unlike those of Create and -# Set. They cannot contain special characters. - -description: "update: invalid character" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a~b\": 1}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-basic.textproto b/firestore/tests/unit/v1beta1/testdata/update-basic.textproto deleted file mode 100644 index 9da316f58ebe..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-basic.textproto +++ /dev/null @@ -1,30 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A simple call, resulting in a single update operation. - -description: "update: basic" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-complex.textproto b/firestore/tests/unit/v1beta1/testdata/update-complex.textproto deleted file mode 100644 index 1a6d9eff64b9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-complex.textproto +++ /dev/null @@ -1,65 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A call to a write method with complicated input data. - -description: "update: complex" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - array_value: < - values: < - integer_value: 1 - > - values: < - double_value: 2.5 - > - > - > - > - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "c" - value: < - array_value: < - values: < - string_value: "three" - > - values: < - map_value: < - fields: < - key: "d" - value: < - boolean_value: true - > - > - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-del-alone.textproto deleted file mode 100644 index 8f558233f037..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del-alone.textproto +++ /dev/null @@ -1,25 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the input data consists solely of Deletes, then the update operation has no -# map, just an update mask. - -description: "update: Delete alone" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": \"Delete\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del-dot.textproto b/firestore/tests/unit/v1beta1/testdata/update-del-dot.textproto deleted file mode 100644 index c0ebdf61f787..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del-dot.textproto +++ /dev/null @@ -1,46 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# After expanding top-level dotted fields, fields with Delete values are pruned -# from the output data, but appear in the update mask. - -description: "update: Delete with a dotted field" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b.c\": \"Delete\", \"b.d\": 2}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "d" - value: < - integer_value: 2 - > - > - > - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b.c" - field_paths: "b.d" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-del-nested.textproto deleted file mode 100644 index ed102697e682..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del-nested.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a top-level key. - -description: "update: Delete cannot be nested" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": {\"b\": \"Delete\"}}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-del-noarray-nested.textproto deleted file mode 100644 index a2eec49661c0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del-noarray-nested.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "update: Delete cannot be anywhere inside an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-del-noarray.textproto deleted file mode 100644 index a7eea87ef49f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del-noarray.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "update: Delete cannot be in an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"Delete\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-del.textproto b/firestore/tests/unit/v1beta1/testdata/update-del.textproto deleted file mode 100644 index ec443e6c7035..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-del.textproto +++ /dev/null @@ -1,32 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a field's value is the Delete sentinel, then it doesn't appear in the update -# data, but does in the mask. - -description: "update: Delete" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"Delete\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-exists-precond.textproto b/firestore/tests/unit/v1beta1/testdata/update-exists-precond.textproto deleted file mode 100644 index 3c6fef4e2263..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-exists-precond.textproto +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update method does not support an explicit exists precondition. - -description: "update: Exists precondition is invalid" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - exists: true - > - json_data: "{\"a\": 1}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-fp-empty-component.textproto b/firestore/tests/unit/v1beta1/testdata/update-fp-empty-component.textproto deleted file mode 100644 index c3bceff3e4b8..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-fp-empty-component.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Empty fields are not allowed. - -description: "update: empty field path component" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a..b\": 1}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-nested-transform-and-nested-value.textproto b/firestore/tests/unit/v1beta1/testdata/update-nested-transform-and-nested-value.textproto deleted file mode 100644 index d2cee270d531..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-nested-transform-and-nested-value.textproto +++ /dev/null @@ -1,58 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# For updates, top-level paths in json-like map inputs are split on the dot. That -# is, an input {"a.b.c": 7} results in an update to field c of object b of object -# a with value 7. In order to specify this behavior, the update must use a -# fieldmask "a.b.c". However, fieldmasks are only used for concrete values - -# transforms are separately encoded in a DocumentTransform_FieldTransform array. - -# This test exercises a bug found in python -# (https://github.com/googleapis/google-cloud-python/issues/7215) in which nested -# transforms ({"a.c": "ServerTimestamp"}) next to nested values ({"a.b": 7}) -# incorrectly caused the fieldmask "a" to be set, which has the effect of wiping -# out all data in "a" other than what was specified in the json-like input. - -# Instead, as this test specifies, transforms should not affect the fieldmask. - -description: "update: Nested transforms should not affect the field mask, even\nwhen there are other values that do. Transforms should only affect the\nDocumentTransform_FieldTransform list." -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a.b\": 7, \"a.c\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - integer_value: 7 - > - > - > - > - > - > - update_mask: < - field_paths: "a.b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-no-paths.textproto b/firestore/tests/unit/v1beta1/testdata/update-no-paths.textproto deleted file mode 100644 index b524b7483f79..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-no-paths.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# It is a client-side error to call Update with empty data. - -description: "update: no paths" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-all-transforms.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-all-transforms.textproto deleted file mode 100644 index 8cfad4732034..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-all-transforms.textproto +++ /dev/null @@ -1,82 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can be created with any amount of transforms. - -description: "update-paths: all transforms in a single call" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "c" - > - field_paths: < - field: "d" - > - json_values: "1" - json_values: "\"ServerTimestamp\"" - json_values: "[\"ArrayUnion\", 1, 2, 3]" - json_values: "[\"ArrayRemove\", 4, 5, 6]" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-alone.textproto deleted file mode 100644 index 68f0e147b2de..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-alone.textproto +++ /dev/null @@ -1,39 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ArrayRemove, then no update operation should -# be produced. - -description: "update-paths: ArrayRemove alone" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[\"ArrayRemove\", 1, 2, 3]" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-multi.textproto deleted file mode 100644 index b60c3f36a6c0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-multi.textproto +++ /dev/null @@ -1,76 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayRemove field. Since all the ArrayRemove -# fields are removed, the only field in the update is "a". - -description: "update-paths: multiple ArrayRemove fields" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "c" - > - json_values: "1" - json_values: "[\"ArrayRemove\", 1, 2, 3]" - json_values: "{\"d\": [\"ArrayRemove\", 4, 5, 6]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - remove_all_from_array: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-nested.textproto deleted file mode 100644 index 381be19d553f..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-nested.textproto +++ /dev/null @@ -1,59 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayRemove value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "update-paths: nested ArrayRemove field" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "{\"c\": [\"ArrayRemove\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray-nested.textproto deleted file mode 100644 index 35f6c67b2e56..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray-nested.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayRemove. Firestore transforms don't support array indexing. - -description: "update-paths: ArrayRemove cannot be anywhere inside an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, {\"b\": [\"ArrayRemove\", 1, 2, 3]}]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray.textproto deleted file mode 100644 index 45cab48dd9e1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-noarray.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayRemove must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "update-paths: ArrayRemove cannot be in an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, 2, [\"ArrayRemove\", 1, 2, 3]]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-with-st.textproto deleted file mode 100644 index 67b92a3ef3b9..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove-with-st.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "update-paths: The ServerTimestamp sentinel cannot be in an ArrayUnion" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[\"ArrayRemove\", 1, \"ServerTimestamp\", 3]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove.textproto deleted file mode 100644 index d3866676ede0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayremove.textproto +++ /dev/null @@ -1,57 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayRemove is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "update-paths: ArrayRemove with data" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "[\"ArrayRemove\", 1, 2, 3]" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - remove_all_from_array: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-alone.textproto deleted file mode 100644 index 48100e0abceb..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-alone.textproto +++ /dev/null @@ -1,39 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ArrayUnion, then no update operation should -# be produced. - -description: "update-paths: ArrayUnion alone" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[\"ArrayUnion\", 1, 2, 3]" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-multi.textproto deleted file mode 100644 index 03772e5ddd1a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-multi.textproto +++ /dev/null @@ -1,76 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ArrayUnion field. Since all the ArrayUnion -# fields are removed, the only field in the update is "a". - -description: "update-paths: multiple ArrayUnion fields" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "c" - > - json_values: "1" - json_values: "[\"ArrayUnion\", 1, 2, 3]" - json_values: "{\"d\": [\"ArrayUnion\", 4, 5, 6]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - field_transforms: < - field_path: "c.d" - append_missing_elements: < - values: < - integer_value: 4 - > - values: < - integer_value: 5 - > - values: < - integer_value: 6 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-nested.textproto deleted file mode 100644 index 1420e4e2806b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-nested.textproto +++ /dev/null @@ -1,59 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# An ArrayUnion value can occur at any depth. In this case, the transform applies -# to the field path "b.c". Since "c" is removed from the update, "b" becomes -# empty, so it is also removed from the update. - -description: "update-paths: nested ArrayUnion field" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "{\"c\": [\"ArrayUnion\", 1, 2, 3]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray-nested.textproto deleted file mode 100644 index ab75bf38a3ae..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray-nested.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ArrayUnion. Firestore transforms don't support array indexing. - -description: "update-paths: ArrayUnion cannot be anywhere inside an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, {\"b\": [\"ArrayUnion\", 1, 2, 3]}]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray.textproto deleted file mode 100644 index fac72644fc38..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-noarray.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# ArrayUnion must be the value of a field. Firestore transforms don't support -# array indexing. - -description: "update-paths: ArrayUnion cannot be in an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, 2, [\"ArrayRemove\", 1, 2, 3]]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-with-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-with-st.textproto deleted file mode 100644 index d194c09bd775..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion-with-st.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. It may not appear in -# an ArrayUnion. - -description: "update-paths: The ServerTimestamp sentinel cannot be in an ArrayUnion" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[\"ArrayUnion\", 1, \"ServerTimestamp\", 3]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion.textproto deleted file mode 100644 index fc56c1e29471..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-arrayunion.textproto +++ /dev/null @@ -1,57 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with ArrayUnion is removed from the data in the update operation. Instead -# it appears in a separate Transform operation. - -description: "update-paths: ArrayUnion with data" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "[\"ArrayUnion\", 1, 2, 3]" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - append_missing_elements: < - values: < - integer_value: 1 - > - values: < - integer_value: 2 - > - values: < - integer_value: 3 - > - > - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-basic.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-basic.textproto deleted file mode 100644 index 515f29d6af02..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-basic.textproto +++ /dev/null @@ -1,33 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A simple call, resulting in a single update operation. - -description: "update-paths: basic" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "1" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-complex.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-complex.textproto deleted file mode 100644 index 38a832239f5c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-complex.textproto +++ /dev/null @@ -1,72 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A call to a write method with complicated input data. - -description: "update-paths: complex" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "[1, 2.5]" - json_values: "{\"c\": [\"three\", {\"d\": true}]}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - array_value: < - values: < - integer_value: 1 - > - values: < - double_value: 2.5 - > - > - > - > - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "c" - value: < - array_value: < - values: < - string_value: "three" - > - values: < - map_value: < - fields: < - key: "d" - value: < - boolean_value: true - > - > - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-del-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-del-alone.textproto deleted file mode 100644 index 5dbb787de94b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-del-alone.textproto +++ /dev/null @@ -1,28 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the input data consists solely of Deletes, then the update operation has no -# map, just an update mask. - -description: "update-paths: Delete alone" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "\"Delete\"" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-del-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-del-nested.textproto deleted file mode 100644 index bdf65fb0ad91..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-del-nested.textproto +++ /dev/null @@ -1,14 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a top-level key. - -description: "update-paths: Delete cannot be nested" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "{\"b\": \"Delete\"}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray-nested.textproto deleted file mode 100644 index d3da15dda80e..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray-nested.textproto +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "update-paths: Delete cannot be anywhere inside an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, {\"b\": \"Delete\"}]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray.textproto deleted file mode 100644 index 9ebdd0945198..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-del-noarray.textproto +++ /dev/null @@ -1,16 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Delete sentinel must be the value of a field. Deletes are implemented by -# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not -# support array indexing. - -description: "update-paths: Delete cannot be in an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, 2, \"Delete\"]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-del.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-del.textproto deleted file mode 100644 index 5197a78488f0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-del.textproto +++ /dev/null @@ -1,39 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If a field's value is the Delete sentinel, then it doesn't appear in the update -# data, but does in the mask. - -description: "update-paths: Delete" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "\"Delete\"" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-exists-precond.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-exists-precond.textproto deleted file mode 100644 index 084e07726ee0..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-exists-precond.textproto +++ /dev/null @@ -1,17 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update method does not support an explicit exists precondition. - -description: "update-paths: Exists precondition is invalid" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - exists: true - > - field_paths: < - field: "a" - > - json_values: "1" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-del.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-del.textproto deleted file mode 100644 index 5c92aeb8ca8b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-del.textproto +++ /dev/null @@ -1,47 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If one nested field is deleted, and another isn't, preserve the second. - -description: "update-paths: field paths with delete" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "foo" - field: "bar" - > - field_paths: < - field: "foo" - field: "delete" - > - json_values: "1" - json_values: "\"Delete\"" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "foo" - value: < - map_value: < - fields: < - key: "bar" - value: < - integer_value: 1 - > - > - > - > - > - > - update_mask: < - field_paths: "foo.bar" - field_paths: "foo.delete" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup-transforms.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup-transforms.textproto deleted file mode 100644 index a84725a8d4d1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup-transforms.textproto +++ /dev/null @@ -1,23 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The same field cannot occur more than once, even if all the operations are -# transforms. - -description: "update-paths: duplicate field path with only transforms" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "a" - > - json_values: "[\"ArrayUnion\", 1, 2, 3]" - json_values: "\"ServerTimestamp\"" - json_values: "[\"ArrayUnion\", 4, 5, 6]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup.textproto deleted file mode 100644 index fedbd3aab99d..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-dup.textproto +++ /dev/null @@ -1,22 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The same field cannot occur more than once. - -description: "update-paths: duplicate field path" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "a" - > - json_values: "1" - json_values: "2" - json_values: "3" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty-component.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty-component.textproto deleted file mode 100644 index 7a5df25b7ed2..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty-component.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Empty fields are not allowed. - -description: "update-paths: empty field path component" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "*" - field: "" - > - json_values: "1" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty.textproto deleted file mode 100644 index 311e309326d1..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-empty.textproto +++ /dev/null @@ -1,13 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A FieldPath of length zero is invalid. - -description: "update-paths: empty field path" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - > - json_values: "1" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-multi.textproto deleted file mode 100644 index 9ba41e39812c..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-multi.textproto +++ /dev/null @@ -1,42 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The UpdatePaths or equivalent method takes a list of FieldPaths. Each FieldPath -# is a sequence of uninterpreted path components. - -description: "update-paths: multiple-element field path" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - field: "b" - > - json_values: "1" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - integer_value: 1 - > - > - > - > - > - > - update_mask: < - field_paths: "a.b" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-nosplit.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-fp-nosplit.textproto deleted file mode 100644 index 516495266707..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-fp-nosplit.textproto +++ /dev/null @@ -1,48 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# FieldPath components are not split on dots. - -description: "update-paths: FieldPath elements are not split on dots" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a.b" - field: "f.g" - > - json_values: "{\"n.o\": 7}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a.b" - value: < - map_value: < - fields: < - key: "f.g" - value: < - map_value: < - fields: < - key: "n.o" - value: < - integer_value: 7 - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "`a.b`.`f.g`" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-no-paths.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-no-paths.textproto deleted file mode 100644 index d9939dc94701..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-no-paths.textproto +++ /dev/null @@ -1,10 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# It is a client-side error to call Update with empty data. - -description: "update-paths: no paths" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-1.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-1.textproto deleted file mode 100644 index 1710b91097e3..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-1.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another. - -description: "update-paths: prefix #1" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - field: "b" - > - field_paths: < - field: "a" - > - json_values: "1" - json_values: "2" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-2.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-2.textproto deleted file mode 100644 index be78ab58a63b..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-2.textproto +++ /dev/null @@ -1,19 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another. - -description: "update-paths: prefix #2" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "a" - field: "b" - > - json_values: "1" - json_values: "2" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-3.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-3.textproto deleted file mode 100644 index b8a84c9d1f80..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-prefix-3.textproto +++ /dev/null @@ -1,20 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another, even if the values -# could in principle be combined. - -description: "update-paths: prefix #3" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "a" - field: "d" - > - json_values: "{\"b\": 1}" - json_values: "2" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-special-chars.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-special-chars.textproto deleted file mode 100644 index 51cb33b31268..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-special-chars.textproto +++ /dev/null @@ -1,53 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# FieldPaths can contain special characters. - -description: "update-paths: special characters" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "*" - field: "~" - > - field_paths: < - field: "*" - field: "`" - > - json_values: "1" - json_values: "2" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "*" - value: < - map_value: < - fields: < - key: "`" - value: < - integer_value: 2 - > - > - fields: < - key: "~" - value: < - integer_value: 1 - > - > - > - > - > - > - update_mask: < - field_paths: "`*`.`\\``" - field_paths: "`*`.`~`" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-alone.textproto deleted file mode 100644 index abc44f55b463..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-alone.textproto +++ /dev/null @@ -1,29 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ServerTimestamps, then no update operation -# should be produced. - -description: "update-paths: ServerTimestamp alone" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "\"ServerTimestamp\"" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - set_to_server_value: REQUEST_TIME - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-multi.textproto deleted file mode 100644 index b0b7df17d836..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-multi.textproto +++ /dev/null @@ -1,56 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ServerTimestamp field. Since all the -# ServerTimestamp fields are removed, the only field in the update is "a". - -description: "update-paths: multiple ServerTimestamp fields" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - field_paths: < - field: "c" - > - json_values: "1" - json_values: "\"ServerTimestamp\"" - json_values: "{\"d\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c.d" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-nested.textproto deleted file mode 100644 index 3077368318e8..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-nested.textproto +++ /dev/null @@ -1,49 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A ServerTimestamp value can occur at any depth. In this case, the transform -# applies to the field path "b.c". Since "c" is removed from the update, "b" -# becomes empty, so it is also removed from the update. - -description: "update-paths: nested ServerTimestamp field" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "{\"c\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray-nested.textproto deleted file mode 100644 index 2c2cb89b62f4..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray-nested.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. - -description: "update-paths: ServerTimestamp cannot be anywhere inside an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, {\"b\": \"ServerTimestamp\"}]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray.textproto deleted file mode 100644 index a2baa66f5762..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-noarray.textproto +++ /dev/null @@ -1,15 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. Firestore transforms -# don't support array indexing. - -description: "update-paths: ServerTimestamp cannot be in an array value" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "[1, 2, \"ServerTimestamp\"]" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st-with-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st-with-empty-map.textproto deleted file mode 100644 index a54a241565de..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st-with-empty-map.textproto +++ /dev/null @@ -1,51 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a ServerTimestamp and a map both reside inside a map, the ServerTimestamp -# should be stripped out but the empty map should remain. - -description: "update-paths: ServerTimestamp beside an empty map" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - json_values: "{\"b\": {}, \"c\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - map_value: < - > - > - > - > - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-st.textproto deleted file mode 100644 index 40634c165864..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-st.textproto +++ /dev/null @@ -1,49 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with the special ServerTimestamp sentinel is removed from the data in the -# update operation. Instead it appears in a separate Transform operation. Note -# that in these tests, the string "ServerTimestamp" should be replaced with the -# special ServerTimestamp value. - -description: "update-paths: ServerTimestamp with data" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - field_paths: < - field: "a" - > - field_paths: < - field: "b" - > - json_values: "1" - json_values: "\"ServerTimestamp\"" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-paths-uptime.textproto b/firestore/tests/unit/v1beta1/testdata/update-paths-uptime.textproto deleted file mode 100644 index 7a15874bea64..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-paths-uptime.textproto +++ /dev/null @@ -1,40 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update call supports a last-update-time precondition. - -description: "update-paths: last-update-time precondition" -update_paths: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - update_time: < - seconds: 42 - > - > - field_paths: < - field: "a" - > - json_values: "1" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - update_time: < - seconds: 42 - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-prefix-1.textproto b/firestore/tests/unit/v1beta1/testdata/update-prefix-1.textproto deleted file mode 100644 index e5c895e73b49..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-prefix-1.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another. - -description: "update: prefix #1" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a.b\": 1, \"a\": 2}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-prefix-2.textproto b/firestore/tests/unit/v1beta1/testdata/update-prefix-2.textproto deleted file mode 100644 index 4870176186a7..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-prefix-2.textproto +++ /dev/null @@ -1,11 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another. - -description: "update: prefix #2" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"a.b\": 2}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-prefix-3.textproto b/firestore/tests/unit/v1beta1/testdata/update-prefix-3.textproto deleted file mode 100644 index 0c03b0d6b845..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-prefix-3.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In the input data, one field cannot be a prefix of another, even if the values -# could in principle be combined. - -description: "update: prefix #3" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": {\"b\": 1}, \"a.d\": 2}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-quoting.textproto b/firestore/tests/unit/v1beta1/testdata/update-quoting.textproto deleted file mode 100644 index 20e530a7609a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-quoting.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# In a field path, any component beginning with a non-letter or underscore is -# quoted. - -description: "update: non-letter starting chars are quoted, except underscore" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"_0.1.+2\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "_0" - value: < - map_value: < - fields: < - key: "1" - value: < - map_value: < - fields: < - key: "+2" - value: < - integer_value: 1 - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "_0.`1`.`+2`" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-split-top-level.textproto b/firestore/tests/unit/v1beta1/testdata/update-split-top-level.textproto deleted file mode 100644 index d1b0ca0da163..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-split-top-level.textproto +++ /dev/null @@ -1,45 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update method splits only top-level keys at dots. Keys at other levels are -# taken literally. - -description: "update: Split on dots for top-level keys only" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"h.g\": {\"j.k\": 6}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "h" - value: < - map_value: < - fields: < - key: "g" - value: < - map_value: < - fields: < - key: "j.k" - value: < - integer_value: 6 - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "h.g" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-split.textproto b/firestore/tests/unit/v1beta1/testdata/update-split.textproto deleted file mode 100644 index b96fd6a4f70a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-split.textproto +++ /dev/null @@ -1,44 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update method splits top-level keys at dots. - -description: "update: split on dots" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a.b.c\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - map_value: < - fields: < - key: "c" - value: < - integer_value: 1 - > - > - > - > - > - > - > - > - > - update_mask: < - field_paths: "a.b.c" - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-alone.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-alone.textproto deleted file mode 100644 index 0d5ab6e9fbaf..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-alone.textproto +++ /dev/null @@ -1,26 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# If the only values in the input are ServerTimestamps, then no update operation -# should be produced. - -description: "update: ServerTimestamp alone" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a" - set_to_server_value: REQUEST_TIME - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-dot.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-dot.textproto deleted file mode 100644 index 19d4d18432e7..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-dot.textproto +++ /dev/null @@ -1,27 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# Like other uses of ServerTimestamp, the data is pruned and the field does not -# appear in the update mask, because it is in the transform. In this case An -# update operation is produced just to hold the precondition. - -description: "update: ServerTimestamp with dotted field" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a.b.c\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.b.c" - set_to_server_value: REQUEST_TIME - > - > - current_document: < - exists: true - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-multi.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-multi.textproto deleted file mode 100644 index 0434cb59ab5a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-multi.textproto +++ /dev/null @@ -1,49 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A document can have more than one ServerTimestamp field. Since all the -# ServerTimestamp fields are removed, the only field in the update is "a". - -# b is not in the mask because it will be set in the transform. c must be in the -# mask: it should be replaced entirely. The transform will set c.d to the -# timestamp, but the update will delete the rest of c. - -description: "update: multiple ServerTimestamp fields" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "c" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - field_transforms: < - field_path: "c.d" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-nested.textproto deleted file mode 100644 index f79d9c6a072a..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-nested.textproto +++ /dev/null @@ -1,42 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A ServerTimestamp value can occur at any depth. In this case, the transform -# applies to the field path "b.c". Since "c" is removed from the update, "b" -# becomes empty, so it is also removed from the update. - -description: "update: nested ServerTimestamp field" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - field_paths: "b" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-noarray-nested.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-noarray-nested.textproto deleted file mode 100644 index 2939dd646436..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-noarray-nested.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# There cannot be an array value anywhere on the path from the document root to -# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. - -description: "update: ServerTimestamp cannot be anywhere inside an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-noarray.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-noarray.textproto deleted file mode 100644 index f3879cdf2260..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-noarray.textproto +++ /dev/null @@ -1,12 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The ServerTimestamp sentinel must be the value of a field. Firestore transforms -# don't support array indexing. - -description: "update: ServerTimestamp cannot be in an array value" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" - is_error: true -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st-with-empty-map.textproto b/firestore/tests/unit/v1beta1/testdata/update-st-with-empty-map.textproto deleted file mode 100644 index 1901de2a15ef..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st-with-empty-map.textproto +++ /dev/null @@ -1,48 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# When a ServerTimestamp and a map both reside inside a map, the ServerTimestamp -# should be stripped out but the empty map should remain. - -description: "update: ServerTimestamp beside an empty map" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": {\"b\": {}, \"c\": \"ServerTimestamp\"}}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - map_value: < - fields: < - key: "b" - value: < - map_value: < - > - > - > - > - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "a.c" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-st.textproto b/firestore/tests/unit/v1beta1/testdata/update-st.textproto deleted file mode 100644 index 12045a9220dc..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-st.textproto +++ /dev/null @@ -1,42 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# A key with the special ServerTimestamp sentinel is removed from the data in the -# update operation. Instead it appears in a separate Transform operation. Note -# that in these tests, the string "ServerTimestamp" should be replaced with the -# special ServerTimestamp value. - -description: "update: ServerTimestamp with data" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - exists: true - > - > - writes: < - transform: < - document: "projects/projectID/databases/(default)/documents/C/d" - field_transforms: < - field_path: "b" - set_to_server_value: REQUEST_TIME - > - > - > - > -> diff --git a/firestore/tests/unit/v1beta1/testdata/update-uptime.textproto b/firestore/tests/unit/v1beta1/testdata/update-uptime.textproto deleted file mode 100644 index 66119ac61c13..000000000000 --- a/firestore/tests/unit/v1beta1/testdata/update-uptime.textproto +++ /dev/null @@ -1,37 +0,0 @@ -# DO NOT MODIFY. This file was generated by -# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. - -# The Update call supports a last-update-time precondition. - -description: "update: last-update-time precondition" -update: < - doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" - precondition: < - update_time: < - seconds: 42 - > - > - json_data: "{\"a\": 1}" - request: < - database: "projects/projectID/databases/(default)" - writes: < - update: < - name: "projects/projectID/databases/(default)/documents/C/d" - fields: < - key: "a" - value: < - integer_value: 1 - > - > - > - update_mask: < - field_paths: "a" - > - current_document: < - update_time: < - seconds: 42 - > - > - > - > -> diff --git a/spanner/.coveragerc b/spanner/.coveragerc deleted file mode 100644 index b178b094aa1d..000000000000 --- a/spanner/.coveragerc +++ /dev/null @@ -1,19 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[run] -branch = True - -[report] -fail_under = 100 -show_missing = True -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py \ No newline at end of file diff --git a/spanner/.flake8 b/spanner/.flake8 deleted file mode 100644 index 0268ecc9c55c..000000000000 --- a/spanner/.flake8 +++ /dev/null @@ -1,14 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - *_pb2.py - - # Standard linting exemptions. - __pycache__, - .git, - *.pyc, - conf.py diff --git a/spanner/.repo-metadata.json b/spanner/.repo-metadata.json deleted file mode 100644 index 05efd37f1dbd..000000000000 --- a/spanner/.repo-metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "spanner", - "name_pretty": "Cloud Spanner", - "product_documentation": "https://cloud.google.com/spanner/docs/", - "client_documentation": "https://googleapis.dev/python/spanner/latest", - "issue_tracker": "https://issuetracker.google.com/issues?q=componentid:190851%2B%20status:open", - "release_level": "ga", - "language": "python", - "repo": "googleapis/google-cloud-python", - "distribution_name": "google-cloud-spanner", - "api_id": "spanner.googleapis.com", - "requires_billing": true -} \ No newline at end of file diff --git a/spanner/CHANGELOG.md b/spanner/CHANGELOG.md deleted file mode 100644 index 7262e584878a..000000000000 --- a/spanner/CHANGELOG.md +++ /dev/null @@ -1,348 +0,0 @@ -# Changelog - -[PyPI History][1] - -[1]: https://pypi.org/project/google-cloud-spanner/#history - -## 1.13.0 - -11-11-2019 15:59 PST - - -### Implementation Changes -Fix TransactionPingingPool to stop thowing ''NoneType' object is not callable' error. ([#9609](https://github.com/googleapis/google-cloud-python/pull/9609)) -Return sessions from pool in LIFO order. ([#9454](https://github.com/googleapis/google-cloud-python/pull/9454)) - -### Documentation -- Add Python 2 sunset banner to documentation. ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036)) -- Update description of the `timeout_secs` parameter. ([#9381](https://github.com/googleapis/google-cloud-python/pull/9381)) - -### Internal / Testing Changes -- Harden `test_transaction_batch_update*` systests against partial success + abort. ([#9579](https://github.com/googleapis/google-cloud-python/pull/9579)) - -## 1.12.0 - -10-23-2019 19:09 PDT - - -### Implementation Changes -- Add `batch_create_session` calls to session pools. ([#9488](https://github.com/googleapis/google-cloud-python/pull/9488)) - -### New Features -- Add `client_options` to client constructor. ([#9151](https://github.com/googleapis/google-cloud-python/pull/9151)) - -### Internal / Testing Changes -- Harden 'test_reload_instance' systest against eventual consistency failures. ([#9394](https://github.com/googleapis/google-cloud-python/pull/9394)) -- Harden 'test_transaction_batch_update_w_syntax_error' systest. ([#9395](https://github.com/googleapis/google-cloud-python/pull/9395)) -- Propagate errors from 'Transaction.batch_update' in systest. ([#9393](https://github.com/googleapis/google-cloud-python/pull/9393)) - -## 1.11.0 - -10-15-2019 06:55 PDT - - -### Implementation Changes -- Adjust gRPC timeouts (via synth). ([#9330](https://github.com/googleapis/google-cloud-python/pull/9330)) -- Make `session_count` optional for `SpannerClient.batch_create_sessions` (via synth). ([#9280](https://github.com/googleapis/google-cloud-python/pull/9280)) -- Remove send / receive message size limit, update docstrings (via synth). ([#8968](https://github.com/googleapis/google-cloud-python/pull/8968)) - -### New Features -- Add `batch_create_sessions` method to generated client (via synth). ([#9087](https://github.com/googleapis/google-cloud-python/pull/9087)) - -### Dependencies -- Pin 'google-cloud-core >= 1.0.3, < 2.0.0dev'. ([#9445](https://github.com/googleapis/google-cloud-python/pull/9445)) - -### Documentation -- Remove references to old authentication credentials in docs. ([#9456](https://github.com/googleapis/google-cloud-python/pull/9456)) -- Fix intersphinx reference to requests. ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294)) -- Fix `run_in_transaction` return value docs. ([#9264](https://github.com/googleapis/google-cloud-python/pull/9264)) -- Remove CI for gh-pages, use googleapis.dev for `api_core` refs. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085)) -- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035)) -- Add DML insert and update examples to README. ([#8698](https://github.com/googleapis/google-cloud-python/pull/8698)) -- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805)) - -## 1.10.0 - -07-24-2019 17:32 PDT - - -### Implementation Changes -- Add backoff for `run_in_transaction' when backend does not provide 'RetryInfo' in response. ([#8461](https://github.com/googleapis/google-cloud-python/pull/8461)) -- Adjust gRPC timeouts (via synth). ([#8445](https://github.com/googleapis/google-cloud-python/pull/8445)) -- Allow kwargs to be passed to create_channel (via synth). ([#8403](https://github.com/googleapis/google-cloud-python/pull/8403)) - -### New Features -- Add 'options_' argument to clients' 'get_iam_policy'; pin black version (via synth). ([#8659](https://github.com/googleapis/google-cloud-python/pull/8659)) -- Add 'client_options' support, update list method docstrings (via synth). ([#8522](https://github.com/googleapis/google-cloud-python/pull/8522)) - -### Dependencies -- Bump minimum version for google-api-core to 1.14.0. ([#8709](https://github.com/googleapis/google-cloud-python/pull/8709)) -- Update pin for 'grpc-google-iam-v1' to 0.12.3+. ([#8647](https://github.com/googleapis/google-cloud-python/pull/8647)) - -### Documentation -- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705)) -- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288)) - -### Internal / Testing Changes -- Fixes [#8545](https://github.com/googleapis/google-cloud-python/pull/8545) by removing typing information for kwargs to not conflict with type checkers ([#8546](https://github.com/googleapis/google-cloud-python/pull/8546)) -- Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464)) -- Declare encoding as utf-8 in pb2 files (via synth). ([#8363](https://github.com/googleapis/google-cloud-python/pull/8363)) -- Add disclaimer to auto-generated template files (via synth). ([#8327](https://github.com/googleapis/google-cloud-python/pull/8327)) -- Suppress checking 'cov-fail-under' in nox default session (via synth). ([#8251](https://github.com/googleapis/google-cloud-python/pull/8251)) -- Blacken noxfile.py, setup.py (via synth). ([#8131](https://github.com/googleapis/google-cloud-python/pull/8131)) -- Harden synth replacement against template adding whitespace. ([#8103](https://github.com/googleapis/google-cloud-python/pull/8103)) - -## 1.9.0 - -05-16-2019 12:54 PDT - - -### Implementation Changes -- Add routing header to method metadata (via synth). ([#7750](https://github.com/googleapis/google-cloud-python/pull/7750)) -- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535)) - -### New Features -- Add `client_info` support to client. ([#7878](https://github.com/googleapis/google-cloud-python/pull/7878)) - -### Dependencies -- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993)) - -### Documentation -- Expand API reference for snapshot / transaction. ([#7618](https://github.com/googleapis/google-cloud-python/pull/7618)) - -### Internal / Testing Changes -- Add nox session `docs`, remove retries for DEADLINE_EXCEEDED (via synth). ([#7781](https://github.com/googleapis/google-cloud-python/pull/7781)) -- Added matching END tags to Spanner Tests ([#7529](https://github.com/googleapis/google-cloud-python/pull/7529)) - -## 1.8.0 - -03-05-2019 12:57 PST - - -### Implementation Changes -- Protoc-generated serialization update. ([#7095](https://github.com/googleapis/google-cloud-python/pull/7095)) -- Fix typo in exported param type name. ([#7295](https://github.com/googleapis/google-cloud-python/pull/7295)) - -### New Features -- Add Batch DML support. ([#7485](https://github.com/googleapis/google-cloud-python/pull/7485)) - -### Documentation -- Copy lintified proto files, update docstrings (via synth). ([#7453](https://github.com/googleapis/google-cloud-python/pull/7453)) -- Fix Batch object creation instructions. ([#7341](https://github.com/googleapis/google-cloud-python/pull/7341)) -- Updated client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307)) -- Fix README to install spanner instead of datastore. ([#7301](https://github.com/googleapis/google-cloud-python/pull/7301)) - -### Internal / Testing Changes -- Add clarifying comment to blacken nox target. ([#7403](https://github.com/googleapis/google-cloud-python/pull/7403)) -- Ensure that GRPC config file is included in MANIFEST.in after templating. ([#7046](https://github.com/googleapis/google-cloud-python/pull/7046)) -- Add protos as an artifact to library. ([#7205](https://github.com/googleapis/google-cloud-python/pull/7205)) -- Update copyright headers. - -## 1.7.1 - -12-14-2018 15:18 PST - - -### Documentation -- Announce Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910)) -- Normalize documentation for 'page_size' / 'max_results' / 'page_token' ([#6842](https://github.com/googleapis/google-cloud-python/pull/6842)) - -### Internal / Testing Changes -- Include grpc config in manifest ([#6928](https://github.com/googleapis/google-cloud-python/pull/6928)) - -## 1.7.0 - -12-10-2018 13:10 PST - - -### Implementation Changes -- Add PingingPool and TransactionPingingPool to toplevel module ([#6886](https://github.com/googleapis/google-cloud-python/pull/6886)) -- Add `operation_id` parameter to `Database.update_ddl`. ([#6825](https://github.com/googleapis/google-cloud-python/pull/6825)) -- Pick up changes to GAPIC method configuration ([#6615](https://github.com/googleapis/google-cloud-python/pull/6615)) -- Add timeout + retry settings to Sessions/Snapshots ([#6536](https://github.com/googleapis/google-cloud-python/pull/6536)) -- Pick up fixes to GAPIC generator. ([#6576](https://github.com/googleapis/google-cloud-python/pull/6576)) - -### Dependencies -- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835)) - -### Internal / Testing Changes -- Add baseline for synth.metadata -- Blacken. ([#6846](https://github.com/googleapis/google-cloud-python/pull/6846)) -- Update noxfile. -- Blacken all gen'd libs ([#6792](https://github.com/googleapis/google-cloud-python/pull/6792)) -- Omit local deps ([#6701](https://github.com/googleapis/google-cloud-python/pull/6701)) -- Run black at end of synth.py ([#6698](https://github.com/googleapis/google-cloud-python/pull/6698)) -- Run Black on Generated libraries ([#6666](https://github.com/googleapis/google-cloud-python/pull/6666)) -- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642)) -- Add tags to DML system tests ([#6580](https://github.com/googleapis/google-cloud-python/pull/6580)) - -## 1.6.1 - -11-09-2018 14:49 PST - -### Implementation Changes -- Fix client_info bug, update docstrings. ([#6420](https://github.com/googleapis/google-cloud-python/pull/6420)) - -### Documentation -- Update README service links in quickstart guides. ([#6322](https://github.com/googleapis/google-cloud-python/pull/6322)) -- Normalize use of support level badges ([#6159](https://github.com/googleapis/google-cloud-python/pull/6159)) -- Fix typo in spanner usage documentation ([#6209](https://github.com/googleapis/google-cloud-python/pull/6209)) - -### Internal / Testing Changes -- Rationalize 'all_types' round-trip systest ([#6379](https://github.com/googleapis/google-cloud-python/pull/6379)) -- Bump minimum 'api_core' version for all GAPIC libs to 1.4.1. ([#6391](https://github.com/googleapis/google-cloud-python/pull/6391)) -- Add systest for returning empty array struct ([#4449](https://github.com/googleapis/google-cloud-python/pull/4449)) -- Add systests not needing tables ([#6308](https://github.com/googleapis/google-cloud-python/pull/6308)) -- Use new Nox ([#6175](https://github.com/googleapis/google-cloud-python/pull/6175)) - -## 1.6.0 - -10-08-2018 08:25 PDT - -### New Features -- Add support for DML/PDML. ([#6151](https://github.com/googleapis/google-cloud-python/pull/6151)) - -### Implementation Changes -- Add 'synth.py' and regen GAPIC code. ([#6040](https://github.com/googleapis/google-cloud-python/pull/6040)) - -### Documentation -- Remove invalid examples of `database.transaction()`. ([#6032](https://github.com/googleapis/google-cloud-python/pull/6032)) -- Redirect renamed `usage.html`/`client.html` -> `index.html`. ([#5996](https://github.com/googleapis/google-cloud-python/pull/5996)) -- Fix leakage of sections into sidebar menu. ([#5986](https://github.com/googleapis/google-cloud-python/pull/5986)) -- Prepare documentation for repo split. ([#5938](https://github.com/googleapis/google-cloud-python/pull/5938)) - -### Internal / Testing Changes -- Remove extra `grpc_gcp` system tests. ([#6049](https://github.com/googleapis/google-cloud-python/pull/6049)) - -## 1.5.0 - -### New Features - -- Add support for session / pool labels ([#5734](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5734)) -- Add support for gRPC connection management ([#5553](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5553)) - -### Dependencies - -- Add `grpcio-gcp` dependency for Cloud Spanner ([#5904](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5904)) - -### Internal / Testing Changes - -- Don't hardcode endpoint URL in grpc_gcp unit tests. ([#5893](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5893)) -- Run `grpc_gcp` unit tests only with Python 2.7 / 3.6. ([#5871](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5871)) -- Nox: use inplace installs ([#5865](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5865)) -- Benchmarks: print() is a function in Python 3 ([#5862](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5862)) -- Retry `test_transaction_read_and_insert_then_rollback` when aborted. ([#5737](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5737)) -- Skip the flaky `test_update_database_ddl` systest. ([#5704](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5704)) - -## 1.4.0 - -### Implementation Changes -- Ensure that initial resume token is bytes, not text. (#5450) -- Prevent process_read_batch from mutating params (#5416) -- Avoid overwriting '__module__' of messages from shared modules. (#5364) - -### New Features -- Add support for Python 3.7 (#5288) -- Add support for Spanner struct params. (#5463) - -### Internal / Testing Changes -- Modify system tests to use prerelease versions of grpcio (#5304) - -## 1.3.0 - -### Interface additions - -- Added `spanner_v1.COMMIT_TIMESTAMP`. (#5102) - -## 1.2.0 - -### New features - -- Added batch query support (#4938) - -### Implementation changes - -- Removed custom timestamp class in favor of the one in google-api-core. (#4980) - -### Dependencies - -- Update minimum version for google-api-core to 1.1.0 (#5030) - -### Documentation - -- Update package metadata release status to 'Stable' (#5031) - -## 1.1.0 - -### Dependencies - -- The minimum version for `google-api-core` has been updated to version 1.0.0. This may cause some incompatibility with older google-cloud libraries, you will need to update those libraries if you have a dependency conflict. (#4944, #4946) - -### Testing and internal changes - -- Fix load_keys() in YCSB-like benchmark for cloud spanner. (#4919) -- Install local dependencies when running lint (#4936) -- Re-enable lint for tests, remove usage of pylint (#4921) -- Normalize all setup.py files (#4909) -- Fix system test util to populate streaming (#4888) -- Retry conflict errors in system test (#4850) - -## 1.0.0 - -### Breaking Changes - -- `to_pb` has now been made private (`_to_pb`) in `KeySet` - and `KeyRange` (#4740) - -### Documentation Changes -- Database update_ddl missing param in documentation (#4749) - -## 0.30.0 - -### Breaking Changes - -- The underlying autogenerated client library was re-generated to pick up new - features and resolve bugs, this may change the exceptions raised from various - methods. (#4695) -- Made `StreamedResultSet`'s `row`, `consume_all`, and `consume_next` members - private (#4492) - -### Implementation Changes - -- `Keyset` can now infer defaults to `start_closed` or `end_closed` when only one argument is specified. (#4735) - -### Documentation - -- Brought Spanner README more in line with others. (#4306, #4317) - -### Testing - -- Added several new system tests and fixed minor issues with existing tests. ( - #4631, #4569, #4573, #4572, #4416, #4411, #4407, #4386, #4419, #4489, - #4678, #4620, #4418, #4403, #4397, #4383, #4371, #4372, #4374, #4370, #4285, - #4321) -- Excluded generated code from linting. (#4375) -- Added a `nox -s default` session for all packages. (#4324) - -## 0.29.0 - -### Implementation Changes - -- **Bugfix**: Clear `session._transaction` before calling - `_delay_until_retry` (#4185) -- **Bugfix**: Be permissive about merging an empty list. (#4170, - fixes #4164) - -### Documentation - -- Added link to "Python Development Environment Setup Guide" in - project README (#4187, h/t to @michaelawyu) - -### Dependencies - -- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency - on `google-api-core` (#4221, #4280) -- Deferring to `google-api-core` for `grpcio` and - `googleapis-common-protos`dependencies (#4096, #4098) - -PyPI: https://pypi.org/project/google-cloud-spanner/0.29.0/ diff --git a/spanner/LICENSE b/spanner/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/spanner/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/spanner/MANIFEST.in b/spanner/MANIFEST.in deleted file mode 100644 index d2edac373469..000000000000 --- a/spanner/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include README.rst LICENSE -include google/cloud/spanner_v1/gapic/transports/spanner.grpc.config -recursive-include google *.json *.proto -recursive-include tests * -global-exclude *.py[co] -global-exclude __pycache__ diff --git a/spanner/README.rst b/spanner/README.rst deleted file mode 100644 index d18dbcfbc628..000000000000 --- a/spanner/README.rst +++ /dev/null @@ -1,242 +0,0 @@ -Python Client for Cloud Spanner -=============================== - -|GA| |pypi| |versions| - -`Cloud Spanner`_ is the world's first fully managed relational database service -to offer both strong consistency and horizontal scalability for -mission-critical online transaction processing (OLTP) applications. With Cloud -Spanner you enjoy all the traditional benefits of a relational database; but -unlike any other relational database service, Cloud Spanner scales horizontally -to hundreds or thousands of servers to handle the biggest transactional -workloads. - - -- `Client Library Documentation`_ -- `Product Documentation`_ - -.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability -.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-spanner.svg - :target: https://pypi.org/project/google-cloud-spanner/ -.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-spanner.svg - :target: https://pypi.org/project/google-cloud-spanner/ -.. _Cloud Spanner: https://cloud.google.com/spanner/ -.. _Client Library Documentation: https://googleapis.dev/python/spanner/latest -.. _Product Documentation: https://cloud.google.com/spanner/docs - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. `Enable the Google Cloud Spanner API.`_ -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Enable the Google Cloud Spanner API.: https://cloud.google.com/spanner -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Supported Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 - -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - source /bin/activate - /bin/pip install google-cloud-spanner - - -Windows -^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - \Scripts\activate - \Scripts\pip.exe install google-cloud-spanner - - -Example Usage -------------- - - -Executing Arbitrary SQL in a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Generally, to work with Cloud Spanner, you will want a transaction. The -preferred mechanism for this is to create a single function, which executes -as a callback to ``database.run_in_transaction``: - -.. code:: python - - # First, define the function that represents a single "unit of work" - # that should be run within the transaction. - def update_anniversary(transaction, person_id, unix_timestamp): - # The query itself is just a string. - # - # The use of @parameters is recommended rather than doing your - # own string interpolation; this provides protections against - # SQL injection attacks. - query = """SELECT anniversary FROM people - WHERE id = @person_id""" - - # When executing the SQL statement, the query and parameters are sent - # as separate arguments. When using parameters, you must specify - # both the parameters themselves and their types. - row = transaction.execute_sql( - query=query, - params={'person_id': person_id}, - param_types={ - 'person_id': types.INT64_PARAM_TYPE, - }, - ).one() - - # Now perform an update on the data. - old_anniversary = row[0] - new_anniversary = _compute_anniversary(old_anniversary, years) - transaction.update( - 'people', - ['person_id', 'anniversary'], - [person_id, new_anniversary], - ) - - # Actually run the `update_anniversary` function in a transaction. - database.run_in_transaction(update_anniversary, - person_id=42, - unix_timestamp=1335020400, - ) - - -Select records using a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once you have a transaction object (such as the first argument sent to -``run_in_transaction``), reading data is easy: - -.. code:: python - - # Define a SELECT query. - query = """SELECT e.first_name, e.last_name, p.telephone - FROM employees as e, phones as p - WHERE p.employee_id == e.employee_id""" - - # Execute the query and return results. - result = transaction.execute_sql(query) - for row in result.rows: - print(row) - - -Insert records using Data Manipulation Language (DML) with a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use the ``execute_update()`` method to execute a DML statement: - -.. code:: python - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - def insert_singers(transaction): - row_ct = transaction.execute_update( - "INSERT Singers (SingerId, FirstName, LastName) " - " VALUES (10, 'Virginia', 'Watson')" - ) - - print("{} record(s) inserted.".format(row_ct)) - - database.run_in_transaction(insert_singers) - - -Insert records using Mutations with a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To add one or more records to a table, use ``insert``: - -.. code:: python - - transaction.insert( - 'citizens', - columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ], - ) - - -Update records using Data Manipulation Language (DML) with a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: python - - spanner_client = spanner.Client() - instance = spanner_client.instance(instance_id) - database = instance.database(database_id) - - def update_albums(transaction): - row_ct = transaction.execute_update( - "UPDATE Albums " - "SET MarketingBudget = MarketingBudget * 2 " - "WHERE SingerId = 1 and AlbumId = 1" - ) - - print("{} record(s) updated.".format(row_ct)) - - database.run_in_transaction(update_albums) - - -Update records using Mutations with a Transaction -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``Transaction.update`` updates one or more existing records in a table. Fails -if any of the records does not already exist. - -.. code:: python - - transaction.update( - 'citizens', - columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ], - ) - - -Next Steps -~~~~~~~~~~ - -- See the `Client Library Documentation`_ to learn how to connect to Cloud - Spanner using this Client Library. -- Read the `Product documentation`_ to learn - more about the product and see How-to Guides. diff --git a/spanner/benchmark/bin/ycsb b/spanner/benchmark/bin/ycsb deleted file mode 100644 index bb98c8c174ae..000000000000 --- a/spanner/benchmark/bin/ycsb +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -# A YCSB-like executable that can be integrated into PerfKitBenchmarker. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -python ${DIR}/../ycsb.py "${@:1}" diff --git a/spanner/benchmark/ycsb.py b/spanner/benchmark/ycsb.py deleted file mode 100644 index bad4e0fe9271..000000000000 --- a/spanner/benchmark/ycsb.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -"""The YCSB client in Python. - -Usage: - - # Set up instance and load data into database. - - # Set up environment variables. You should use your own credentials and gcloud - # project. - $ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json - $ export GCLOUD_PROJECT=gcloud-project-name - - # Run the benchmark. - $ python spanner/benchmark/ycsb.py run cloud_spanner -P pkb/workloada \ - -p table=usertable -p cloudspanner.instance=ycsb-542756a4 \ - -p recordcount=5000 -p operationcount=100 -p cloudspanner.database=ycsb \ - -p num_worker=1 - - # To make a package so it can work with PerfKitBenchmarker. - $ cd spanner; tar -cvzf ycsb-python.0.0.5.tar.gz benchmark/* - -""" - -from google.cloud import spanner - -import argparse -import numpy -import random -import string -import threading -import timeit - - -OPERATIONS = ['readproportion', 'updateproportion', 'scanproportion', - 'insertproportion'] -NUM_FIELD = 10 - - -def parse_options(): - """Parses options.""" - parser = argparse.ArgumentParser() - parser.add_argument('command', help='The YCSB command.') - parser.add_argument('benchmark', help='The YCSB benchmark.') - parser.add_argument('-P', '--workload', action='store', dest='workload', - default='', help='The path to a YCSB workload file.') - parser.add_argument('-p', '--parameter', action='append', dest='parameters', - default=[], help='The key=value pair of parameter.') - parser.add_argument('-b', '--num_bucket', action='store', type=int, - dest='num_bucket', default=1000, - help='The number of buckets in output.') - - args = parser.parse_args() - - parameters = {} - parameters['command'] = args.command - parameters['num_bucket'] = args.num_bucket - - for parameter in args.parameters: - parts = parameter.strip().split('=') - parameters[parts[0]] = parts[1] - - with open(args.workload, 'r') as f: - for line in f.readlines(): - parts = line.split('=') - key = parts[0].strip() - if key in OPERATIONS: - parameters[key] = parts[1].strip() - - return parameters - - -def open_database(parameters): - """Opens a database specified by the parameters from parse_options().""" - spanner_client = spanner.Client() - instance_id = parameters['cloudspanner.instance'] - instance = spanner_client.instance(instance_id) - database_id = parameters['cloudspanner.database'] - pool = spanner.BurstyPool(int(parameters['num_worker'])) - database = instance.database(database_id, pool=pool) - - return database - - -def load_keys(database, parameters): - """Loads keys from database.""" - keys = [] - with database.snapshot() as snapshot: - results = snapshot.execute_sql( - 'SELECT u.id FROM %s u' % parameters['table']) - - for row in results: - keys.append(row[0]) - - return keys - - -def read(database, table, key): - """Does a single read operation.""" - with database.snapshot() as snapshot: - result = snapshot.execute_sql('SELECT u.* FROM %s u WHERE u.id="%s"' % - (table, key)) - for row in result: - key = row[0] - for i in range(NUM_FIELD): - field = row[i + 1] - - -def update(database, table, key): - """Does a single update operation.""" - field = random.randrange(10) - value = ''.join(random.choice(string.printable) for i in range(100)) - with database.batch() as batch: - batch.update(table=table, columns=('id', 'field%d' % field), - values=[(key, value)]) - - -def do_operation(database, keys, table, operation, latencies_ms): - """Does a single operation and records latency.""" - key = random.choice(keys) - start = timeit.default_timer() - if operation == 'read': - read(database, table, key) - elif operation == 'update': - update(database, table, key) - else: - raise ValueError('Unknown operation: %s' % operation) - end = timeit.default_timer() - latencies_ms[operation].append((end - start) * 1000) - - -def aggregate_metrics(latencies_ms, duration_ms, num_bucket): - """Aggregates metrics.""" - overall_op_count = 0 - op_counts = {operation : len(latency) for operation, - latency in latencies_ms.iteritems()} - overall_op_count = sum([op_count for op_count in op_counts.itervalues()]) - - print('[OVERALL], RunTime(ms), %f' % duration_ms) - print('[OVERALL], Throughput(ops/sec), %f' % (float(overall_op_count) / - duration_ms * 1000.0)) - - for operation in op_counts.keys(): - operation_upper = operation.upper() - print('[%s], Operations, %d' % (operation_upper, op_counts[operation])) - print('[%s], AverageLatency(us), %f' % ( - operation_upper, numpy.average(latencies_ms[operation]) * 1000.0)) - print('[%s], LatencyVariance(us), %f' % ( - operation_upper, numpy.var(latencies_ms[operation]) * 1000.0)) - print('[%s], MinLatency(us), %f' % ( - operation_upper, min(latencies_ms[operation]) * 1000.0)) - print('[%s], MaxLatency(us), %f' % ( - operation_upper, max(latencies_ms[operation]) * 1000.0)) - print('[%s], 95thPercentileLatency(us), %f' % ( - operation_upper, - numpy.percentile(latencies_ms[operation], 95.0) * 1000.0)) - print('[%s], 99thPercentileLatency(us), %f' % ( - operation_upper, - numpy.percentile(latencies_ms[operation], 99.0) * 1000.0)) - print('[%s], 99.9thPercentileLatency(us), %f' % ( - operation_upper, - numpy.percentile(latencies_ms[operation], 99.9) * 1000.0)) - print('[%s], Return=OK, %d' % (operation_upper, op_counts[operation])) - latency_array = numpy.array(latencies_ms[operation]) - for j in range(num_bucket): - print('[%s], %d, %d' % ( - operation_upper, j, - ((j <= latency_array) & (latency_array < (j + 1))).sum())) - print('[%s], >%d, %d' % (operation_upper, num_bucket, - (num_bucket <= latency_array).sum())) - - -class WorkloadThread(threading.Thread): - """A single thread running workload.""" - - def __init__(self, database, keys, parameters, total_weight, weights, - operations): - threading.Thread.__init__(self) - self._database = database - self._keys = keys - self._parameters = parameters - self._total_weight = total_weight - self._weights = weights - self._operations = operations - self._latencies_ms = {} - for operation in self._operations: - self._latencies_ms[operation] = [] - - def run(self): - """Run a single thread of the workload.""" - i = 0 - operation_count = int(self._parameters['operationcount']) - while i < operation_count: - i += 1 - weight = random.uniform(0, self._total_weight) - for j in range(len(self._weights)): - if weight <= self._weights[j]: - do_operation(self._database, self._keys, - self._parameters['table'], - self._operations[j], self._latencies_ms) - break - - def latencies_ms(self): - """Returns the latencies.""" - return self._latencies_ms - - -def run_workload(database, keys, parameters): - """Runs workload against the database.""" - total_weight = 0.0 - weights = [] - operations = [] - latencies_ms = {} - for operation in OPERATIONS: - weight = float(parameters[operation]) - if weight <= 0.0: - continue - total_weight += weight - op_code = operation.split('proportion')[0] - operations.append(op_code) - weights.append(total_weight) - latencies_ms[op_code] = [] - - threads = [] - start = timeit.default_timer() - for i in range(int(parameters['num_worker'])): - thread = WorkloadThread(database, keys, parameters, total_weight, - weights, operations) - thread.start() - threads.append(thread) - - for thread in threads: - thread.join() - end = timeit.default_timer() - - for thread in threads: - thread_latencies_ms = thread.latencies_ms() - for key in latencies_ms.keys(): - latencies_ms[key].extend(thread_latencies_ms[key]) - - aggregate_metrics(latencies_ms, (end - start) * 1000.0, - parameters['num_bucket']) - - -if __name__ == '__main__': - parameters = parse_options() - if parameters['command'] == 'run': - if 'cloudspanner.channels' in parameters: - assert parameters['cloudspanner.channels'] == 1, ( - 'Python doesn\'t support channels > 1.') - database = open_database(parameters) - keys = load_keys(database, parameters) - run_workload(database, keys, parameters) - else: - raise ValueError('Unknown command %s.' % parameters['command']) diff --git a/spanner/docs/README.rst b/spanner/docs/README.rst deleted file mode 120000 index 89a0106941ff..000000000000 --- a/spanner/docs/README.rst +++ /dev/null @@ -1 +0,0 @@ -../README.rst \ No newline at end of file diff --git a/spanner/docs/_static/custom.css b/spanner/docs/_static/custom.css deleted file mode 100644 index 0abaf229fce3..000000000000 --- a/spanner/docs/_static/custom.css +++ /dev/null @@ -1,4 +0,0 @@ -div#python2-eol { - border-color: red; - border-width: medium; -} \ No newline at end of file diff --git a/spanner/docs/_templates/layout.html b/spanner/docs/_templates/layout.html deleted file mode 100644 index 228529efe2d2..000000000000 --- a/spanner/docs/_templates/layout.html +++ /dev/null @@ -1,50 +0,0 @@ - -{% extends "!layout.html" %} -{%- block content %} -{%- if theme_fixed_sidebar|lower == 'true' %} -
- {{ sidebar() }} - {%- block document %} -
- {%- if render_sidebar %} -
- {%- endif %} - - {%- block relbar_top %} - {%- if theme_show_relbar_top|tobool %} - - {%- endif %} - {% endblock %} - -
-
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please - visit Python 2 support on Google Cloud. -
- {% block body %} {% endblock %} -
- - {%- block relbar_bottom %} - {%- if theme_show_relbar_bottom|tobool %} - - {%- endif %} - {% endblock %} - - {%- if render_sidebar %} -
- {%- endif %} -
- {%- endblock %} -
-
-{%- else %} -{{ super() }} -{%- endif %} -{%- endblock %} diff --git a/spanner/docs/advanced-session-pool-topics.rst b/spanner/docs/advanced-session-pool-topics.rst deleted file mode 100644 index 18fd7db64c1b..000000000000 --- a/spanner/docs/advanced-session-pool-topics.rst +++ /dev/null @@ -1,96 +0,0 @@ -Advanced Session Pool Topics -============================ - -Custom Session Pool Implementations ------------------------------------ - -You can supply your own pool implementation, which must satisfy the -contract laid out in -:class:`~google.cloud.spanner.pool.AbstractSessionPool`: - -.. code-block:: python - - from google.cloud.spanner import AbstractSessionPool - - class MyCustomPool(AbstractSessionPool): - - def __init__(self, custom_param): - super(MyCustomPool, self).__init__() - self.custom_param = custom_param - - def bind(self, database): - ... - - def get(self, read_only=False): - ... - - def put(self, session, discard_if_full=True): - ... - - pool = MyCustomPool(custom_param=42) - database = instance.database(DATABASE_NAME, pool=pool) - -Lowering latency for read / query operations --------------------------------------------- - -Some applications may need to minimize latency for read operations, including -particularly the overhead of making an API request to create or refresh a -session. :class:`~google.cloud.spanner.pool.PingingPool` is designed for such -applications, which need to configure a background thread to do the work of -keeping the sessions fresh. - -Create an instance of :class:`~google.cloud.spanner.pool.PingingPool`: - -.. code-block:: python - - from google.cloud.spanner import Client, PingingPool - - client = Client() - instance = client.instance(INSTANCE_NAME) - pool = PingingPool(size=10, default_timeout=5, ping_interval=300) - database = instance.database(DATABASE_NAME, pool=pool) - -Set up a background thread to ping the pool's session, keeping them -from becoming stale: - -.. code-block:: python - - import threading - - background = threading.Thread(target=pool.ping, name='ping-pool') - background.daemon = True - background.start() - -Lowering latency for mixed read-write operations ------------------------------------------------- - -Some applications may need to minimize latency for read write operations, -including particularly the overhead of making an API request to create or -refresh a session or to begin a session's transaction. -:class:`~google.cloud.spanner.pool.TransactionPingingPool` is designed for -such applications, which need to configure a background thread to do the work -of keeping the sessions fresh and starting their transactions after use. - -Create an instance of -:class:`~google.cloud.spanner.pool.TransactionPingingPool`: - -.. code-block:: python - - from google.cloud.spanner import Client, TransactionPingingPool - - client = Client() - instance = client.instance(INSTANCE_NAME) - pool = TransactionPingingPool(size=10, default_timeout=5, ping_interval=300) - database = instance.database(DATABASE_NAME, pool=pool) - -Set up a background thread to ping the pool's session, keeping them -from becoming stale, and ensuring that each session has a new transaction -started before it is used: - -.. code-block:: python - - import threading - - background = threading.Thread(target=pool.ping, name='ping-pool') - background.daemon = True - background.start() diff --git a/spanner/docs/api-reference.rst b/spanner/docs/api-reference.rst deleted file mode 100644 index c767b23afac0..000000000000 --- a/spanner/docs/api-reference.rst +++ /dev/null @@ -1,33 +0,0 @@ -API Reference -============= - -The following classes and methods constitute the Spanner client. -Most likely, you will be interacting almost exclusively with these: - -.. toctree:: - :maxdepth: 1 - - client-api - instance-api - database-api - session-api - keyset-api - snapshot-api - batch-api - transaction-api - streamed-api - - -The classes and methods above depend on the following, lower-level -classes and methods. Documentation for these is provided for completion, -and some advanced use cases may wish to interact with these directly: - -.. toctree:: - :maxdepth: 1 - - gapic/v1/api - gapic/v1/types - gapic/v1/admin_database_api - gapic/v1/admin_database_types - gapic/v1/admin_instance_api - gapic/v1/admin_instance_types diff --git a/spanner/docs/batch-api.rst b/spanner/docs/batch-api.rst deleted file mode 100644 index ecd51b01cdd9..000000000000 --- a/spanner/docs/batch-api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Batch API -========= - -.. automodule:: google.cloud.spanner_v1.batch - :members: - :show-inheritance: diff --git a/spanner/docs/batch-usage.rst b/spanner/docs/batch-usage.rst deleted file mode 100644 index 419ca106e6cc..000000000000 --- a/spanner/docs/batch-usage.rst +++ /dev/null @@ -1,203 +0,0 @@ -Batching Modifications -###################### - -A :class:`~google.cloud.spanner.batch.Batch` represents a set of data -modification operations to be performed on tables in a database. Use of a -``Batch`` does not require creating an explicit -:class:`~google.cloud.spanner.snapshot.Snapshot` or -:class:`~google.cloud.spanner.transaction.Transaction`. Until -:meth:`~google.cloud.spanner.batch.Batch.commit` is called on a ``Batch``, -no changes are propagated to the back-end. - - -Starting a Batch ----------------- - -Construct a :class:`~google.cloud.spanner.batch.Batch` object from a :class:`~google.cloud.spanner.database.Database` object: - -.. code:: python - - from google.cloud import spanner - - client = spanner.Client() - instance = client.instance(INSTANCE_NAME) - database = instance.database(DATABASE_NAME) - - batch = database.batch() - - -Inserting records using a Batch -------------------------------- - -:meth:`Batch.insert` adds one or more new records to a table. Fails if -any of the records already exists. - -.. code:: python - - batch.insert( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Update records using a Batch -------------------------------- - -:meth:`Batch.update` updates one or more existing records in a table. Fails -if any of the records does not already exist. - -.. code:: python - - batch.update( - 'citizens', columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Insert or update records using a Batch --------------------------------------- - -:meth:`Batch.insert_or_update` inserts *or* updates one or more records in a -table. Existing rows have values for the supplied columns overwritten; other -column values are preserved. - -.. code:: python - - batch.insert_or_update( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 31], - ['wylma@example.com', 'Wylma', 'Phlyntstone', 29], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Replace records using a Batch ------------------------------ - -:meth:`Batch.replace` inserts *or* updates one or more records in a -table. Existing rows have values for the supplied columns overwritten; other -column values are set to null. - -.. code:: python - - batch.replace( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['bharney@example.com', 'Bharney', 'Rhubble', 30], - ['bhettye@example.com', 'Bhettye', 'Rhubble', 30], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Delete records using a Batch ----------------------------- - -:meth:`Batch.delete` removes one or more records from a table. Non-existent -rows do not cause errors. - -.. code:: python - - from google.cloud.spanner import KeySet - - to_delete = KeySet(keys=[ - ('bharney@example.com',) - ('nonesuch@example.com',) - ]) - - batch.delete('citizens', to_delete) - - -Commit changes for a Batch --------------------------- - -After describing the modifications to be made to table data via the -:meth:`Batch.insert`, :meth:`Batch.update`, :meth:`Batch.insert_or_update`, -:meth:`Batch.replace`, and :meth:`Batch.delete` methods above, send them to -the back-end by calling :meth:`Batch.commit`, which makes the ``Commit`` -API call. - -.. code:: python - - batch.commit() - - -Use a Batch as a Context Manager --------------------------------- - -Rather than calling :meth:`Batch.commit` manually, you can use the -:class:`Batch` instance as a context manager, and have it called automatically -if the ``with`` block exits without raising an exception. - -.. code:: python - - from google.cloud.spanner import KeySet - - client = spanner.Client() - instance = client.instance(INSTANCE_NAME) - database = instance.database(DATABASE_NAME) - - to_delete = KeySet(keys=[ - ('bharney@example.com',) - ('nonesuch@example.com',) - ]) - - with database.batch() as batch: - - batch.insert( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ]) - - batch.update( - 'citizens', columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ]) - - ... - - batch.delete('citizens', to_delete) - - -Next Step ---------- - -Next, learn about :doc:`snapshot-usage`. diff --git a/spanner/docs/changelog.md b/spanner/docs/changelog.md deleted file mode 120000 index 04c99a55caae..000000000000 --- a/spanner/docs/changelog.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGELOG.md \ No newline at end of file diff --git a/spanner/docs/client-api.rst b/spanner/docs/client-api.rst deleted file mode 100644 index 3cc5a89b2137..000000000000 --- a/spanner/docs/client-api.rst +++ /dev/null @@ -1,7 +0,0 @@ -Spanner Client -============== - -.. automodule:: google.cloud.spanner_v1.client - :members: - :show-inheritance: - diff --git a/spanner/docs/client-usage.rst b/spanner/docs/client-usage.rst deleted file mode 100644 index 801c9cb135da..000000000000 --- a/spanner/docs/client-usage.rst +++ /dev/null @@ -1,87 +0,0 @@ -Spanner Client -============== - -.. _spanner-client: - - -Instantiating a Client ----------------------- - -To use the API, the :class:`~google.cloud.spanner_v1.client.Client` -class defines a high-level interface which handles authorization -and creating other objects: - -.. code:: python - - from google.cloud import spanner - client = spanner.Client() - -Long-lived Defaults -------------------- - -When creating a :class:`~google.cloud.spanner_v1.client.Client`, the -``user_agent`` and ``timeout_seconds`` arguments have sensible -defaults -(:data:`~google.cloud.spanner_v1.client.DEFAULT_USER_AGENT` and -:data:`~google.cloud.spanner_v1.client.DEFAULT_TIMEOUT_SECONDS`). -However, you may over-ride them and these will be used throughout all API -requests made with the ``client`` you create. - -Configuration -------------- - -- For an overview of authentication in ``google.cloud-python``, - see `Authentication - `_. - -- In addition to any authentication configuration, you can also set the - :envvar:`GCLOUD_PROJECT` environment variable for the Google Cloud Console - project you'd like to interact with. If your code is running in Google App - Engine or Google Compute Engine the project will be detected automatically. - (Setting this environment variable is not required, you may instead pass the - ``project`` explicitly when constructing a - :class:`~google.cloud.spanner_v1.client.Client`). - -- After configuring your environment, create a - :class:`~google.cloud.spanner_v1.client.Client` - - .. code:: - - >>> from google.cloud import spanner - >>> client = spanner.Client() - - or pass in ``credentials`` and ``project`` explicitly - - .. code:: - - >>> from google.cloud import spanner - >>> client = spanner.Client(project='my-project', credentials=creds) - -.. tip:: - - Be sure to use the **Project ID**, not the **Project Number**. - - -Warnings about Multiprocessing ------------------------------- - -.. warning:: - When using multiprocessing, the application may hang if a - :class:`Client ` instance is created - before :class:`multiprocessing.Pool` or :class:`multiprocessing.Process` - invokes :func:`os.fork`. The issue is under investigation, but may be only - happening on Macintosh and not Linux. See `GRPC/GRPC#12455 - `_ for - more information. - -Next Step ---------- - -After a :class:`~google.cloud.spanner_v1.client.Client`, the next -highest-level object is an :class:`~google.cloud.spanner_v1.instance.Instance`. -You'll need one before you can interact with databases. - -Next, learn about the :doc:`instance-usage`. - -.. _Instance Admin: https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1 -.. _Database Admin: https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1 diff --git a/spanner/docs/conf.py b/spanner/docs/conf.py deleted file mode 100644 index dd597836fb24..000000000000 --- a/spanner/docs/conf.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- -# -# google-cloud-spanner documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-spanner" -copyright = u"2017, Google" -author = u"Google APIs" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-spanner-doc" - -# -- Options for warnings ------------------------------------------------------ - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', - # Latex figure (float) alignment - #'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-spanner.tex", - u"google-cloud-spanner Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-spanner", - u"google-cloud-spanner Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-spanner", - u"google-cloud-spanner Documentation", - author, - "google-cloud-spanner", - "GAPIC library for the {metadata.shortName} service", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), - "fastavro": ("https://fastavro.readthedocs.io/en/stable/", None), - "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), -} - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/spanner/docs/database-api.rst b/spanner/docs/database-api.rst deleted file mode 100644 index f1ce2a6d8e26..000000000000 --- a/spanner/docs/database-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -Database API -============ - -.. automodule:: google.cloud.spanner_v1.database - :members: - :show-inheritance: - - diff --git a/spanner/docs/database-usage.rst b/spanner/docs/database-usage.rst deleted file mode 100644 index 8989501a7d6a..000000000000 --- a/spanner/docs/database-usage.rst +++ /dev/null @@ -1,251 +0,0 @@ -Database Admin -============== - -After creating a :class:`~google.cloud.spanner.instance.Instance`, you can -interact with individual databases for that instance. - - -List Databases --------------- - -To iterate over all existing databases for an instance, use its -:meth:`~google.cloud.spanner.instance.Instance.list_databases` method: - -.. code:: python - - for database in instance.list_databases(): - # `database` is a `Database` object. - -This method yields :class:`~.spanner_admin_database_v1.types.Database` -objects. - - -Database Factory ----------------- - -To create a :class:`~google.cloud.spanner.database.Database` object: - -.. code:: python - - database = instance.database(database_id, ddl_statements) - -- ``ddl_statements`` is a string containing DDL for the new database. - -You can also use :meth:`Instance.database` to create a local wrapper for -a database that has already been created: - -.. code:: python - - database = instance.database(existing_database_id) - - -Create a new Database ---------------------- - -After creating the database object, use its -:meth:`~google.cloud.spanner.database.Database.create` method to -trigger its creation on the server: - -.. code:: python - - operation = database.create() - -.. note:: - - Creating an instance triggers a "long-running operation" and - returns an :class:`~concurrent.futures.Future`-like object. Use - the :meth:`~concurrent.futures.Future.result` method to wait for - and inspect the result. - - -Update an existing Database ---------------------------- - -After creating the database object, you can apply additional DDL statements -via its :meth:`~google.cloud.spanner.database.Database.update_ddl` method: - -.. code:: python - - operation = database.update_ddl(ddl_statements, operation_id) - -- ``ddl_statements`` is a string containing DDL to be applied to - the database. - -- ``operation_id`` is a string ID for the long-running operation. - -.. note:: - - Update an instance triggers a "long-running operation" and - returns a :class:`google.cloud.spanner.database.Operation` - object. See :ref:`check-on-current-database-operation` for polling - to find out if the operation is completed. - - -Drop a Database ---------------- - -Drop a database using its -:meth:`~google.cloud.spanner.database.Database.drop` method: - -.. code:: python - - database.drop() - - -.. _check-on-current-database-operation: - -Check on Current Database Operation ------------------------------------ - -The :meth:`~google.cloud.spanner.database.Database.create` and -:meth:`~google.cloud.spanner.database.Database.update` methods of instance -object trigger long-running operations on the server, and return instances -conforming to the :class:`~.concurrent.futures.Future` class. - -.. code:: python - - >>> operation = instance.create() - >>> operation.result() - - -Non-Admin Database Usage -======================== - -Use a Snapshot to Read / Query the Database -------------------------------------------- - -A snapshot represents a read-only point-in-time view of the database. - -Calling :meth:`~google.cloud.spanner.database.Database.snapshot` with -no arguments creates a snapshot with strong concurrency: - -.. code:: python - - with database.snapshot() as snapshot: - do_something_with(snapshot) - -See :class:`~google.cloud.spanner.snapshot.Snapshot` for the other options -which can be passed. - -.. note:: - - :meth:`~google.cloud.spanner.database.Database.snapshot` returns an - object intended to be used as a Python context manager (i.e., as the - target of a ``with`` statement). Use the instance, and any result - sets returned by its ``read`` or ``execute_sql`` methods, only inside - the block created by the ``with`` statement. - -See :doc:`snapshot-usage` for more complete examples of snapshot usage. - -Use a Batch to Modify Rows in the Database ------------------------------------------- - -A batch represents a bundled set of insert/upsert/update/delete operations -on the rows of tables in the database. - -.. code:: python - - with database.batch() as batch: - batch.insert_or_update(table, columns, rows) - batch.delete(table, keyset_to_delete) - -.. note:: - - :meth:`~google.cloud.spanner.database.Database.batch` returns an - object intended to be used as a Python context manager (i.e., as the - target of a ``with`` statement). It applies any changes made inside - the block of its ``with`` statement when exiting the block, unless an - exception is raised within the block. Use the batch only inside - the block created by the ``with`` statement. - -See :doc:`batch-usage` for more complete examples of batch usage. - -Use a Transaction to Query / Modify Rows in the Database --------------------------------------------------------- - -A transaction represents the union of a "strong" snapshot and a batch: -it allows ``read`` and ``execute_sql`` operations, and accumulates -insert/upsert/update/delete operations. - -Because other applications may be performing concurrent updates which -would invalidate the reads / queries, the work done by a transaction needs -to be bundled as a retryable "unit of work" function, which takes the -transaction as a required argument: - -.. code:: python - - def unit_of_work(transaction): - result = transaction.execute_sql(QUERY) - - for emp_id, hours, pay in _compute_pay(result): - transaction.insert_or_update( - table='monthly_hours', - columns=['employee_id', 'month', 'hours', 'pay'], - values=[emp_id, month_start, hours, pay]) - - database.run_in_transaction(unit_of_work) - -.. note:: - - :meth:`~google.cloud.spanner.database.Database.run_in_transaction` - commits the transaction automatically if the "unit of work" function - returns without raising an exception. - -.. note:: - - :meth:`~google.cloud.spanner.database.Database.run_in_transaction` - retries the "unit of work" function if the read / query operatoins - or the commit are aborted due to concurrent updates - -See :doc:`transaction-usage` for more complete examples of transaction usage. - -Configuring a session pool for a database ------------------------------------------ - -Under the covers, the ``snapshot``, ``batch``, and ``run_in_transaction`` -methods use a pool of :class:`~google.cloud.spanner.session.Session` objects -to manage their communication with the back-end. You can configure -one of the pools manually to control the number of sessions, timeouts, etc., -and then passing it to the :class:`~google.cloud.spanner.database.Database` -constructor: - -.. code-block:: python - - from google.cloud import spanner - - # Instantiate the Spanner client, and get the appropriate instance. - client = spanner.Client() - instance = client.instance(INSTANCE_NAME) - - # Create a database with a pool of a fixed size. - pool = spanner.FixedSizePool(size=10, default_timeout=5) - database = instance.database(DATABASE_NAME, pool=pool) - -Note that creating a database with a pool may presume that its database -already exists, as it may need to pre-create sessions (rather than creating -them on demand, as the default implementation does). - -You can supply your own pool implementation, which must satisfy the -contract laid out in :class:`~google.cloud.spanner.pool.AbstractSessionPool`: - -.. code-block:: python - - from google.cloud.spanner import AbstractSessionPool - - class MyCustomPool(AbstractSessionPool): - - def __init__(self, database, custom_param): - super(MyCustomPool, self).__init__(database) - self.custom_param = custom_param - - def get(self, read_only=False): - ... - - def put(self, session, discard_if_full=True): - ... - - database = instance.database(DATABASE_NAME, pool=pool) - pool = MyCustomPool(database, custom_param=42) - -See :doc:`advanced-session-pool-topics` for more advanced coverage of -session pools. diff --git a/spanner/docs/gapic/v1/admin_database_api.rst b/spanner/docs/gapic/v1/admin_database_api.rst deleted file mode 100644 index c63f242e8557..000000000000 --- a/spanner/docs/gapic/v1/admin_database_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Database Client API -================================= - -.. automodule:: google.cloud.spanner_admin_database_v1 - :members: - :inherited-members: diff --git a/spanner/docs/gapic/v1/admin_database_types.rst b/spanner/docs/gapic/v1/admin_database_types.rst deleted file mode 100644 index de3d9585c715..000000000000 --- a/spanner/docs/gapic/v1/admin_database_types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Spanner Admin Database Client Types -=================================== - -.. automodule:: google.cloud.spanner_admin_database_v1.types - :members: diff --git a/spanner/docs/gapic/v1/admin_instance_api.rst b/spanner/docs/gapic/v1/admin_instance_api.rst deleted file mode 100644 index c8c320a6cf41..000000000000 --- a/spanner/docs/gapic/v1/admin_instance_api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Admin Instance Client API -================================= - -.. automodule:: google.cloud.spanner_admin_instance_v1 - :members: - :inherited-members: diff --git a/spanner/docs/gapic/v1/admin_instance_types.rst b/spanner/docs/gapic/v1/admin_instance_types.rst deleted file mode 100644 index 4cd06b3ca0d9..000000000000 --- a/spanner/docs/gapic/v1/admin_instance_types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Spanner Admin Instance Client Types -=================================== - -.. automodule:: google.cloud.spanner_admin_instance_v1.types - :members: diff --git a/spanner/docs/gapic/v1/api.rst b/spanner/docs/gapic/v1/api.rst deleted file mode 100644 index 79e4835f2222..000000000000 --- a/spanner/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Spanner Client API -================== - -.. automodule:: google.cloud.spanner_v1 - :members: - :inherited-members: diff --git a/spanner/docs/gapic/v1/transactions.rst b/spanner/docs/gapic/v1/transactions.rst deleted file mode 100644 index d34af43b4ae9..000000000000 --- a/spanner/docs/gapic/v1/transactions.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. - This page is pulled from the TransactionOption type, where this entire - kaboodle is auto-generated. Sphinx does not particularly appreciate - entire narrative documentation, complete with headers, in an arbitrary - class docstring, and complains about this, so I (lukesneeringer@) - manually copied it over here. - - This should probably be updated when the Spanner code is re-generated. - This will be easy to remember because the source that needs to be copied - will be dropped in transaction_pb2.py and Sphinx will complain loudly - about it. - - Internal Google ticket: b/65243734 - -:orphan: - -.. _spanner-txn: - -Transactions -============ - -Each session can have at most one active transaction at a time. After -the active transaction is completed, the session can immediately be -re-used for the next transaction. It is not necessary to create a new -session for each transaction. - -Transaction Modes -================= - -Cloud Spanner supports two transaction modes: - -1. Locking read-write. This type of transaction is the only way to write - data into Cloud Spanner. These transactions rely on pessimistic - locking and, if necessary, two-phase commit. Locking read-write - transactions may abort, requiring the application to retry. - -2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. Snapshot - read-only transactions can be configured to read at timestamps in the - past. Snapshot read-only transactions do not need to be committed. - -For transactions that only read, snapshot read-only transactions provide -simpler semantics and are almost always faster. In particular, read-only -transactions do not take locks, so they do not conflict with read-write -transactions. As a consequence of not taking locks, they also do not -abort, so retry loops are not needed. - -Transactions may only read/write data in a single database. They may, -however, read/write data in different tables within that database. - -Locking Read-Write Transactions -------------------------------- - -Locking transactions may be used to atomically read-modify-write data -anywhere in a database. This type of transaction is externally -consistent. - -Clients should attempt to minimize the amount of time a transaction is -active. Faster transactions commit with higher probability and cause -less contention. Cloud Spanner attempts to keep read locks active as -long as the transaction continues to do reads, and the transaction has -not been terminated by [Commit][google.spanner.v1.Spanner.Commit] or -[Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -inactivity at the client may cause Cloud Spanner to release a -transaction's locks and abort it. - -Reads performed within a transaction acquire locks on the data being -read. Writes can only be done at commit time, after all reads have been -completed. Conceptually, a read-write transaction consists of zero or -more reads or SQL queries followed by -[Commit][google.spanner.v1.Spanner.Commit]. At any time before -[Commit][google.spanner.v1.Spanner.Commit], the client can send a -[Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -transaction. - -Semantics -~~~~~~~~~ - -Cloud Spanner can commit the transaction if all read locks it acquired -are still valid at commit time, and it is able to acquire write locks -for all writes. Cloud Spanner can abort the transaction for any reason. -If a commit attempt returns ``ABORTED``, Cloud Spanner guarantees that -the transaction has not modified any user data in Cloud Spanner. - -Unless the transaction commits, Cloud Spanner makes no guarantees about -how long the transaction's locks were held for. It is an error to use -Cloud Spanner locks for any sort of mutual exclusion other than between -Cloud Spanner transactions themselves. - -Retrying Aborted Transactions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When a transaction aborts, the application can choose to retry the whole -transaction again. To maximize the chances of successfully committing -the retry, the client should execute the retry in the same session as -the original attempt. The original session's lock priority increases -with each consecutive abort, meaning that each attempt has a slightly -better chance of success than the previous. - -Under some circumstances (e.g., many transactions attempting to modify -the same row(s)), a transaction can abort many times in a short period -before successfully committing. Thus, it is not a good idea to cap the -number of retries a transaction can attempt; instead, it is better to -limit the total amount of wall time spent retrying. - -Idle Transactions -~~~~~~~~~~~~~~~~~ - -A transaction is considered idle if it has no outstanding reads or SQL -queries and has not started a read or SQL query within the last 10 -seconds. Idle transactions can be aborted by Cloud Spanner so that they -don't hold on to locks indefinitely. In that case, the commit will fail -with error ``ABORTED``. - -If this behavior is undesirable, periodically executing a simple SQL -query in the transaction (e.g., ``SELECT 1``) prevents the transaction -from becoming idle. - -Snapshot Read-Only Transactions -------------------------------- - -Snapshot read-only transactions provides a simpler method than locking -read-write transactions for doing several consistent reads. However, -this type of transaction does not support writes. - -Snapshot transactions do not take locks. Instead, they work by choosing -a Cloud Spanner timestamp, then executing all reads at that timestamp. -Since they do not acquire locks, they do not block concurrent read-write -transactions. - -Unlike locking read-write transactions, snapshot read-only transactions -never abort. They can fail if the chosen read timestamp is garbage -collected; however, the default garbage collection policy is generous -enough that most applications do not need to worry about this in -practice. - -Snapshot read-only transactions do not need to call -[Commit][google.spanner.v1.Spanner.Commit] or -[Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -permitted to do so). - -To execute a snapshot transaction, the client specifies a timestamp -bound, which tells Cloud Spanner how to choose a read timestamp. - -The types of timestamp bound are: - -- Strong (the default). -- Bounded staleness. -- Exact staleness. - -If the Cloud Spanner database to be read is geographically distributed, -stale read-only transactions can execute more quickly than strong or -read-write transaction, because they are able to execute far from the -leader replica. - -Each type of timestamp bound is discussed in detail below. - -Strong -~~~~~~ - -Strong reads are guaranteed to see the effects of all transactions that -have committed before the start of the read. Furthermore, all rows -yielded by a single read are consistent with each other -- if any part -of the read observes a transaction, all parts of the read see the -transaction. - -Strong reads are not repeatable: two consecutive strong read-only -transactions might return inconsistent results if there are concurrent -writes. If consistency across reads is required, the reads should be -executed within a transaction or at an exact read timestamp. - -See -[TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - -Exact Staleness -~~~~~~~~~~~~~~~ - -These timestamp bounds execute reads at a user-specified timestamp. -Reads at a timestamp are guaranteed to see a consistent prefix of the -global transaction history: they observe modifications done by all -transactions with a commit timestamp <= the read timestamp, and observe -none of the modifications done by transactions with a larger commit -timestamp. They will block until all conflicting transactions that may -be assigned commit timestamps <= the read timestamp have finished. - -The timestamp can either be expressed as an absolute Cloud Spanner -commit timestamp or a staleness relative to the current time. - -These modes do not require a "negotiation phase" to pick a timestamp. As -a result, they execute slightly faster than the equivalent boundedly -stale concurrency modes. On the other hand, boundedly stale reads -usually return fresher results. - -See -[TransactionOptions.ReadOnly.read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read\_timestamp] -and -[TransactionOptions.ReadOnly.exact\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact\_staleness]. - -Bounded Staleness -~~~~~~~~~~~~~~~~~ - -Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -subject to a user-provided staleness bound. Cloud Spanner chooses the -newest timestamp within the staleness bound that allows execution of the -reads at the closest available replica without blocking. - -All rows yielded are consistent with each other -- if any part of the -read observes a transaction, all parts of the read see the transaction. -Boundedly stale reads are not repeatable: two stale reads, even if they -use the same staleness bound, can execute at different timestamps and -thus return inconsistent results. - -Boundedly stale reads execute in two phases: the first phase negotiates -a timestamp among all replicas needed to serve the read. In the second -phase, reads are executed at the negotiated timestamp. - -As a result of the two phase execution, bounded staleness reads are -usually a little slower than comparable exact staleness reads. However, -they are typically able to return fresher results, and are more likely -to execute at the closest replica. - -Because the timestamp negotiation requires up-front knowledge of which -rows will be read, it can only be used with single-use read-only -transactions. - -See -[TransactionOptions.ReadOnly.max\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max\_staleness] -and -[TransactionOptions.ReadOnly.min\_read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min\_read\_timestamp]. - -Old Read Timestamps and Garbage Collection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cloud Spanner continuously garbage collects deleted and overwritten data -in the background to reclaim storage space. This process is known as -"version GC". By default, version GC reclaims versions after they are -one hour old. Because of this, Cloud Spanner cannot perform reads at -read timestamps more than one hour in the past. This restriction also -applies to in-progress reads and/or SQL queries whose timestamp become -too old while executing. Reads and SQL queries with too-old read -timestamps fail with the error ``FAILED_PRECONDITION``. diff --git a/spanner/docs/gapic/v1/types.rst b/spanner/docs/gapic/v1/types.rst deleted file mode 100644 index 28956e60c769..000000000000 --- a/spanner/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Spanner Client Types -=================================== - -.. automodule:: google.cloud.spanner_v1.types - :members: diff --git a/spanner/docs/index.rst b/spanner/docs/index.rst deleted file mode 100644 index 729f42d0e062..000000000000 --- a/spanner/docs/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. include:: README.rst - -Usage Documentation -------------------- -.. toctree:: - :maxdepth: 1 - :titlesonly: - - client-usage - instance-usage - database-usage - batch-usage - snapshot-usage - transaction-usage - -API Documentation ------------------ -.. toctree:: - :maxdepth: 1 - :titlesonly: - - api-reference - advanced-session-pool-topics - -Changelog ---------- - -For a list of all ``google-cloud-spanner`` releases: - -.. toctree:: - :maxdepth: 2 - - changelog diff --git a/spanner/docs/instance-api.rst b/spanner/docs/instance-api.rst deleted file mode 100644 index 127b4c687372..000000000000 --- a/spanner/docs/instance-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -Instance API -============ - -.. automodule:: google.cloud.spanner_v1.instance - :members: - :show-inheritance: - - diff --git a/spanner/docs/instance-usage.rst b/spanner/docs/instance-usage.rst deleted file mode 100644 index 909e36b93f98..000000000000 --- a/spanner/docs/instance-usage.rst +++ /dev/null @@ -1,181 +0,0 @@ -Instance Admin -============== - -After creating a :class:`~google.cloud.spanner.client.Client`, you can -interact with individual instances for a project. - -Instance Configurations ------------------------ - -Each instance within a project maps to a named "instance configuration", -specifying the location and other parameters for a set of instances. These -configurations are defined by the server, and cannot be changed. - -To iterate over all instance configurations available to your project, use the -:meth:`~google.cloud.spanner.client.Client.list_instance_configs` -method of the client: - -.. code:: python - - for config in client.list_instance_configs(): - # `config` is an instance of `InstanceConfig` - - -To fetch a single instance configuration, use the -:meth:`~google.cloud.spanner.client.Client.get_instance_configuration` -method of the client: - -.. code:: python - - config = client.get_instance_configuration('config-name') - -Each of these methods provide -:class:`~.spanner_admin_instance_v1.types.InstanceConfig` objects. - - -List Instances --------------- - -If you want a comprehensive list of all existing instances, iterate over the -:meth:`~google.cloud.spanner.client.Client.list_instances` method of -the client: - -.. code:: python - - for instance in client.list_instances(): - # `instance` is an instance of `Instance` - -This iterator yields :class:`~.spanner_admin_instance_v1.types.Instance` -objects. - - -Instance Factory ----------------- - -To create a :class:`~google.cloud.spanner.instance.Instance` object: - -.. code:: python - - config = configs[0] - instance = client.instance(instance_id, - configuration_name=config.name, - node_count=10, - display_name='My Instance') - -- ``configuration_name`` is the name of the instance configuration to which the - instance will be bound. It must be one of the names configured for your - project, discoverable via - :meth:`~google.cloud.spanner.client.Client.list_instance_configs`. - -- ``node_count`` is a postitive integral count of the number of nodes used - by the instance. More nodes allows for higher performance, but at a higher - billing cost. - -- ``display_name`` is optional. When not provided, ``display_name`` defaults - to the ``instance_id`` value. - -You can also use :meth:`Client.instance` to create a local wrapper for -an instance that has already been created: - -.. code:: python - - instance = client.instance(existing_instance_id) - instance.reload() - - -Create a new Instance ---------------------- - -After creating the instance object, use its -:meth:`~google.cloud.spanner.instance.Instance.create` method to -trigger its creation on the server: - -.. code:: python - - instance.display_name = 'My very own instance' - operation = instance.create() - -.. note:: - - Creating an instance triggers a "long-running operation" and - returns an :class:`google.cloud.spanner.instance.Operation` - object. See :ref:`check-on-current-instance-operation` for polling - to find out if the operation is completed. - - -Refresh metadata for an existing Instance ------------------------------------------ - -After creating the instance object, reload its server-side configuration -using its :meth:`~google.cloud.spanner.instance.Instance.reload` method: - -.. code:: python - - instance.reload() - -This will load ``display_name``, ``config_name``, and ``node_count`` -for the existing ``instance`` object from the back-end. - - -Update an existing Instance ---------------------------- - -After creating the instance object, you can update its metadata via -its :meth:`~google.cloud.spanner.instance.Instance.update` method: - -.. code:: python - - client.display_name = 'New display_name' - operation = instance.update() - -.. note:: - - Update an instance triggers a "long-running operation" and - returns a :class:`google.cloud.spanner.instance.Operation` - object. See :ref:`check-on-current-instance-operation` for polling - to find out if the operation is completed. - - -Delete an existing Instance ---------------------------- - -Delete an instance using its -:meth:`~google.cloud.spanner.instance.Instance.delete` method: - -.. code:: python - - instance.delete() - - -.. _check-on-current-instance-operation: - -Resolve Current Instance Operation ----------------------------------- - -The :meth:`~google.cloud.spanner.instance.Instance.create` and -:meth:`~google.cloud.spanner.instance.Instance.update` methods of instance -object trigger long-running operations on the server, and return instances -of the :class:`~google.cloud.spanner.instance.Operation` class. - -If you want to block on the completion of those operations, use the -``result`` method on the returned objects: - -.. code:: python - - >>> operation = instance.create() - >>> result = operation.result() - -This method will raise an exception if the operation fails. - - -Next Step ---------- - -Now we go down the hierarchy from -:class:`~google.cloud.spanner.instance.Instance` to a -:class:`~google.cloud.spanner.database.Database`. - -Next, learn about the :doc:`database-usage`. - - -.. _Instance Admin API: https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1 diff --git a/spanner/docs/keyset-api.rst b/spanner/docs/keyset-api.rst deleted file mode 100644 index 90137cf87640..000000000000 --- a/spanner/docs/keyset-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -Keyset API -========== - -.. automodule:: google.cloud.spanner_v1.keyset - :members: - :show-inheritance: - - diff --git a/spanner/docs/session-api.rst b/spanner/docs/session-api.rst deleted file mode 100644 index 1f6d0ac60261..000000000000 --- a/spanner/docs/session-api.rst +++ /dev/null @@ -1,15 +0,0 @@ -Session API -=========== - -.. automodule:: google.cloud.spanner_v1.session - :members: - :show-inheritance: - - -Session Pools API -================= - -.. automodule:: google.cloud.spanner_v1.pool - :members: - :show-inheritance: - diff --git a/spanner/docs/snapshot-api.rst b/spanner/docs/snapshot-api.rst deleted file mode 100644 index ca37d8a2591a..000000000000 --- a/spanner/docs/snapshot-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -Snapshot API -============ - -.. automodule:: google.cloud.spanner_v1.snapshot - :members: - :inherited-members: - - diff --git a/spanner/docs/snapshot-usage.rst b/spanner/docs/snapshot-usage.rst deleted file mode 100644 index 4c5a5b24204c..000000000000 --- a/spanner/docs/snapshot-usage.rst +++ /dev/null @@ -1,108 +0,0 @@ -Read-only Transactions via Snapshots -#################################### - -A :class:`~google.cloud.spanner.snapshot.Snapshot` represents a read-only -transaction: when multiple read operations are peformed via a Snapshot, -the results are consistent as of a particular point in time. - - -Beginning a Snapshot --------------------- - -To begin using a snapshot using the default "bound" (which is "strong"), -meaning all reads are performed at a timestamp where all previously-committed -transactions are visible: - -.. code:: python - - snapshot = database.snapshot() - -You can also specify a weaker bound, which can either be to perform all -reads as of a given timestamp: - -.. code:: python - - import datetime - from pytz import UTC - TIMESTAMP = datetime.datetime.utcnow().replace(tzinfo=UTC) - snapshot = database.snapshot(read_timestamp=TIMESTAMP) - -or as of a given duration in the past: - -.. code:: python - - import datetime - DURATION = datetime.timedelta(seconds=5) - snapshot = database.snapshot(exact_staleness=DURATION) - -Single Use and Multiple Use Snapshots -------------------------------------- - -In the context of read only transactions, ``read`` and ``execute_sql`` -methods can be used multiple times if you specify ``multi_use=True`` -in the constructor of the snapshot. However, ``multi_use=True`` is -incompatible with either ``max_staleness`` and/or ``min_read_timestamp``. - -Otherwise ``multi_use`` defaults to ``False`` and the snapshot cannot be -reused. - -.. code:: python - - snapshot = database.snapshot(multi_use=True) - -:meth:`~.spanner_v1.snapshot.Snapshot.begin` can only be used on a -snapshot with ``multi_use=True``. In which case it is also necessary -to call if you need to have multiple pending operations. - -Read Table Data ---------------- - -Read data for selected rows from a table in the database. Calls -the ``Read`` API, which returns all rows specified in ``key_set``, or else -fails if the result set is too large, - -.. code:: python - - with database.snapshot() as snapshot: - result = snapshot.read( - table='table-name', columns=['first_name', 'last_name', 'age'], - key_set=['phred@example.com', 'bharney@example.com']) - - for row in result.rows: - print(row) - -.. note:: - - Perform all iteration within the context of the ``with database.snapshot()`` - block. - - -Execute a SQL Select Statement ------------------------------- - -Read data from a query against tables in the database. Calls -the ``ExecuteSql`` API, which returns all rows matching the query, or else -fails if the result set is too large, - -.. code:: python - - with database.snapshot() as snapshot: - QUERY = ( - 'SELECT e.first_name, e.last_name, p.telephone ' - 'FROM employees as e, phones as p ' - 'WHERE p.employee_id == e.employee_id') - result = snapshot.execute_sql(QUERY) - - for row in list(result): - print(row) - -.. note:: - - Perform all iteration within the context of the ``with database.snapshot()`` - block. - - -Next Step ---------- - -Next, learn about :doc:`transaction-usage`. diff --git a/spanner/docs/streamed-api.rst b/spanner/docs/streamed-api.rst deleted file mode 100644 index 53bab89ba491..000000000000 --- a/spanner/docs/streamed-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -StreamedResultSet API -===================== - -.. automodule:: google.cloud.spanner_v1.streamed - :members: - :show-inheritance: - - diff --git a/spanner/docs/transaction-api.rst b/spanner/docs/transaction-api.rst deleted file mode 100644 index f7e8d4759aa9..000000000000 --- a/spanner/docs/transaction-api.rst +++ /dev/null @@ -1,8 +0,0 @@ -Transaction API -=============== - -.. automodule:: google.cloud.spanner_v1.transaction - :members: - :inherited-members: - - diff --git a/spanner/docs/transaction-usage.rst b/spanner/docs/transaction-usage.rst deleted file mode 100644 index 4e943ed405df..000000000000 --- a/spanner/docs/transaction-usage.rst +++ /dev/null @@ -1,291 +0,0 @@ -Read-write Transactions -####################### - -A :class:`~google.cloud.spanner.transaction.Transaction` represents a -transaction: when the transaction commits, it will send any accumulated -mutations to the server. - -To understand more about how transactions work, visit :ref:`spanner-txn`. -To learn more about how to use them in the Python client, continue reading. - - -Read Table Data ---------------- - -Read data for selected rows from a table in the database. Calls the ``Read`` -API, which returns all rows specified in ``key_set``, or else fails if the -result set is too large, - -.. code:: python - - result = transaction.read( - table='table-name', columns=['first_name', 'last_name', 'age'], - key_set=['phred@example.com', 'bharney@example.com']) - - for row in list(result): - print(row) - -.. note:: - - If streaming a chunk fails due to a "resumable" error, - :meth:`Snapshot.read` retries the ``StreamingRead`` API request, - passing the ``resume_token`` from the last partial result streamed. - - -Execute a SQL Select Statement ------------------------------- - -Read data from a query against tables in the database. Calls -the ``ExecuteSql`` API, which returns all rows matching the query, or else -fails if the result set is too large, - -.. code:: python - - QUERY = ( - 'SELECT e.first_name, e.last_name, p.telephone ' - 'FROM employees as e, phones as p ' - 'WHERE p.employee_id == e.employee_id') - result = transaction.execute_sql(QUERY) - - for row in list(result): - print(row) - - -Insert records using a Transaction ----------------------------------- - -:meth:`Transaction.insert` adds one or more new records to a table. Fails if -any of the records already exists. - -.. code:: python - - transaction.insert( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Update records using a Transaction ----------------------------------- - -:meth:`Transaction.update` updates one or more existing records in a table. Fails -if any of the records does not already exist. - -.. code:: python - - transaction.update( - 'citizens', columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Insert or update records using a Transaction --------------------------------------------- - -:meth:`Transaction.insert_or_update` inserts *or* updates one or more records -in a table. Existing rows have values for the supplied columns overwritten; -other column values are preserved. - -.. code:: python - - transaction.insert_or_update( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 31], - ['wylma@example.com', 'Wylma', 'Phlyntstone', 29], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Replace records using a Transaction ------------------------------------ - -:meth:`Transaction.replace` inserts *or* updates one or more records in a -table. Existing rows have values for the supplied columns overwritten; other -column values are set to null. - -.. code:: python - - transaction.replace( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['bharney@example.com', 'Bharney', 'Rhubble', 30], - ['bhettye@example.com', 'Bhettye', 'Rhubble', 30], - ]) - -.. note:: - - Ensure that data being sent for ``STRING`` columns uses a text string - (``str`` in Python 3; ``unicode`` in Python 2). - - Additionally, if you are writing data intended for a ``BYTES`` column, you - must base64 encode it. - - -Delete records using a Transaction ----------------------------------- - -:meth:`Transaction.delete` removes one or more records from a table. -Non-existent rows do not cause errors. - -.. code:: python - - transaction.delete( - 'citizens', keyset=['bharney@example.com', 'nonesuch@example.com']) - - -Using :meth:`~Database.run_in_transaction` ------------------------------------------- - -Rather than calling :meth:`~Transaction.commit` or :meth:`~Transaction.rollback` -manually, you should use :meth:`~Database.run_in_transaction` to run the -function that you need. The transaction's :meth:`~Transaction.commit` method -will be called automatically if the ``with`` block exits without raising an -exception. The function will automatically be retried for -:class:`~google.api_core.exceptions.Aborted` errors, but will raise on -:class:`~google.api_core.exceptions.GoogleAPICallError` and -:meth:`~Transaction.rollback` will be called on all others. - -.. code:: python - - def _unit_of_work(transaction): - - transaction.insert( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ]) - - transaction.update( - 'citizens', columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ]) - - ... - - transaction.delete('citizens', - keyset['bharney@example.com', 'nonesuch@example.com']) - - db.run_in_transaction(_unit_of_work) - - -Use a Transaction as a Context Manager --------------------------------------- - -Alternatively, you can use the :class:`Transaction` instance as a context -manager. The transaction's :meth:`~Transaction.commit` method will be called -automatically if the ``with`` block exits without raising an exception. - -If an exception is raised inside the ``with`` block, the transaction's -:meth:`~Transaction.rollback` method will automatically be called. - -.. code:: python - - with session.transaction() as transaction: - - transaction.insert( - 'citizens', columns=['email', 'first_name', 'last_name', 'age'], - values=[ - ['phred@exammple.com', 'Phred', 'Phlyntstone', 32], - ['bharney@example.com', 'Bharney', 'Rhubble', 31], - ]) - - transaction.update( - 'citizens', columns=['email', 'age'], - values=[ - ['phred@exammple.com', 33], - ['bharney@example.com', 32], - ]) - - ... - - transaction.delete('citizens', - keyset['bharney@example.com', 'nonesuch@example.com']) - - -Begin a Transaction -------------------- - -.. note:: - - Normally, applications will not construct transactions manually. Rather, - consider using :meth:`~Database.run_in_transaction` or the context manager - as described above. - -To begin using a transaction manually: - -.. code:: python - - transaction = session.transaction() - - -Commit changes for a Transaction --------------------------------- - -.. note:: - - Normally, applications will not commit transactions manually. Rather, - consider using :meth:`~Database.run_in_transaction` or the context manager - as described above. - -After modifications to be made to table data via the -:meth:`Transaction.insert`, :meth:`Transaction.update`, -:meth:`Transaction.insert_or_update`, :meth:`Transaction.replace`, and -:meth:`Transaction.delete` methods above, send them to -the back-end by calling :meth:`Transaction.commit`, which makes the ``Commit`` -API call. - -.. code:: python - - transaction.commit() - - -Roll back changes for a Transaction ------------------------------------ - -.. note:: - - Normally, applications will not roll back transactions manually. Rather, - consider using :meth:`~Database.run_in_transaction` or the context manager - as described above. - -After describing the modifications to be made to table data via the -:meth:`Transaction.insert`, :meth:`Transaction.update`, -:meth:`Transaction.insert_or_update`, :meth:`Transaction.replace`, and -:meth:`Transaction.delete` methods above, cancel the transaction on the -the back-end by calling :meth:`Transaction.rollback`, which makes the -``Rollback`` API call. - -.. code:: python - - transaction.rollback() diff --git a/spanner/docs/usage.html b/spanner/docs/usage.html deleted file mode 100644 index 9b81d6976cda..000000000000 --- a/spanner/docs/usage.html +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - diff --git a/spanner/google/__init__.py b/spanner/google/__init__.py deleted file mode 100644 index 2f4b4738aee1..000000000000 --- a/spanner/google/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/spanner/google/cloud/__init__.py b/spanner/google/cloud/__init__.py deleted file mode 100644 index 2f4b4738aee1..000000000000 --- a/spanner/google/cloud/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/spanner/google/cloud/spanner.py b/spanner/google/cloud/spanner.py deleted file mode 100644 index 0b1d3d949f31..000000000000 --- a/spanner/google/cloud/spanner.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Cloud Spanner API package.""" - -from __future__ import absolute_import - -from google.cloud.spanner_v1 import __version__ -from google.cloud.spanner_v1 import AbstractSessionPool -from google.cloud.spanner_v1 import BurstyPool -from google.cloud.spanner_v1 import Client -from google.cloud.spanner_v1 import COMMIT_TIMESTAMP -from google.cloud.spanner_v1 import enums -from google.cloud.spanner_v1 import FixedSizePool -from google.cloud.spanner_v1 import KeyRange -from google.cloud.spanner_v1 import KeySet -from google.cloud.spanner_v1 import param_types -from google.cloud.spanner_v1 import PingingPool -from google.cloud.spanner_v1 import TransactionPingingPool -from google.cloud.spanner_v1 import types - - -__all__ = ( - "__version__", - "AbstractSessionPool", - "BurstyPool", - "Client", - "COMMIT_TIMESTAMP", - "enums", - "FixedSizePool", - "KeyRange", - "KeySet", - "param_types", - "PingingPool", - "TransactionPingingPool", - "types", -) diff --git a/spanner/google/cloud/spanner_admin_database_v1/__init__.py b/spanner/google/cloud/spanner_admin_database_v1/__init__.py deleted file mode 100644 index 3a5b42403c0d..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -from google.cloud.spanner_admin_database_v1 import types -from google.cloud.spanner_admin_database_v1.gapic import database_admin_client -from google.cloud.spanner_admin_database_v1.gapic import enums - - -class DatabaseAdminClient(database_admin_client.DatabaseAdminClient): - __doc__ = database_admin_client.DatabaseAdminClient.__doc__ - enums = enums - - -__all__ = ("enums", "types", "DatabaseAdminClient") diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/__init__.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py deleted file mode 100644 index cbb2c084cde7..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py +++ /dev/null @@ -1,1006 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.admin.database.v1 DatabaseAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.spanner_admin_database_v1.gapic import database_admin_client_config -from google.cloud.spanner_admin_database_v1.gapic import enums -from google.cloud.spanner_admin_database_v1.gapic.transports import ( - database_admin_grpc_transport, -) -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2_grpc -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class DatabaseAdminClient(object): - """ - Cloud Spanner Database Admin API - - The Cloud Spanner Database Admin API can be used to create, drop, and - list databases. It also enables updating the schema of pre-existing - databases. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.admin.database.v1.DatabaseAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatabaseAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def database_path(cls, project, instance, database): - """Return a fully-qualified database string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}", - project=project, - instance=instance, - database=database, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.DatabaseAdminGrpcTransport, - Callable[[~.Credentials, type], ~.DatabaseAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = database_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=database_admin_grpc_transport.DatabaseAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = database_admin_grpc_transport.DatabaseAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_database( - self, - parent, - create_statement, - extra_statements=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new Cloud Spanner database and starts to prepare it for - serving. The returned ``long-running operation`` will have a name of the - format ``/operations/`` and can be used to - track preparation of the database. The ``metadata`` field type is - ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, - if successful. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `create_statement`: - >>> create_statement = '' - >>> - >>> response = client.create_database(parent, create_statement) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance that will serve the new database. - Values are of the form ``projects//instances/``. - create_statement (str): Required. A ``CREATE DATABASE`` statement, which specifies the ID of the - new database. The database ID must conform to the regular expression - ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 characters in - length. If the database ID is a reserved word or if it contains a - hyphen, the database ID must be enclosed in backticks (`````). - extra_statements (list[str]): An optional list of DDL statements to run inside the newly created - database. Statements can create tables, indexes, etc. These - statements execute atomically with the creation of the database: - if there is an error in any statement, the database is not created. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_database" not in self._inner_api_calls: - self._inner_api_calls[ - "create_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_database, - default_retry=self._method_configs["CreateDatabase"].retry, - default_timeout=self._method_configs["CreateDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.CreateDatabaseRequest( - parent=parent, - create_statement=create_statement, - extra_statements=extra_statements, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_database_admin_pb2.Database, - metadata_type=spanner_database_admin_pb2.CreateDatabaseMetadata, - ) - - def get_database( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the state of a Cloud Spanner database. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> name = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.get_database(name) - - Args: - name (str): Required. The name of the requested database. Values are of the form - ``projects//instances//databases/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Database` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_database" not in self._inner_api_calls: - self._inner_api_calls[ - "get_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_database, - default_retry=self._method_configs["GetDatabase"].retry, - default_timeout=self._method_configs["GetDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.GetDatabaseRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_database_ddl( - self, - database, - statements, - operation_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - execution of the schema change(s). The ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # TODO: Initialize `statements`: - >>> statements = [] - >>> - >>> response = client.update_database_ddl(database, statements) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - database (str): Required. The database to update. - statements (list[str]): Required. DDL statements to be applied to the database. - operation_id (str): If empty, the new update request is assigned an automatically-generated - operation ID. Otherwise, ``operation_id`` is used to construct the name - of the resulting ``Operation``. - - Specifying an explicit operation ID simplifies determining whether the - statements were executed in the event that the ``UpdateDatabaseDdl`` - call is replayed, or the return value is otherwise lost: the - ``database`` and ``operation_id`` fields can be combined to form the - ``name`` of the resulting ``longrunning.Operation``: - ``/operations/``. - - ``operation_id`` should be unique within the database, and must be a - valid identifier: ``[a-z][a-z0-9_]*``. Note that automatically-generated - operation IDs always begin with an underscore. If the named operation - already exists, ``UpdateDatabaseDdl`` returns ``ALREADY_EXISTS``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_database_ddl" not in self._inner_api_calls: - self._inner_api_calls[ - "update_database_ddl" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_database_ddl, - default_retry=self._method_configs["UpdateDatabaseDdl"].retry, - default_timeout=self._method_configs["UpdateDatabaseDdl"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.UpdateDatabaseDdlRequest( - database=database, statements=statements, operation_id=operation_id - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_database_ddl"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=spanner_database_admin_pb2.UpdateDatabaseDdlMetadata, - ) - - def drop_database( - self, - database, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Drops (aka deletes) a Cloud Spanner database. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> client.drop_database(database) - - Args: - database (str): Required. The database to be dropped. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_database" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_database" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_database, - default_retry=self._method_configs["DropDatabase"].retry, - default_timeout=self._method_configs["DropDatabase"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.DropDatabaseRequest(database=database) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_database"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_database_ddl( - self, - database, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those - may be queried using the ``Operations`` API. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.get_database_ddl(database) - - Args: - database (str): Required. The database whose schema we wish to get. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_database_ddl" not in self._inner_api_calls: - self._inner_api_calls[ - "get_database_ddl" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_database_ddl, - default_retry=self._method_configs["GetDatabaseDdl"].retry, - default_timeout=self._method_configs["GetDatabaseDdl"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.GetDatabaseDdlRequest(database=database) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_database_ddl"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a database resource. Replaces any - existing policy. - - Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a database resource. Returns an empty - policy if a database exists but does not have a policy set. - - Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.spanner_admin_database_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_database_v1.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified database - resource. - - Attempting this RPC on a non-existent Cloud Spanner database will result - in a NOT\_FOUND error if the user has ``spanner.databases.list`` - permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_database_v1.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_databases( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists Cloud Spanner databases. - - Example: - >>> from google.cloud import spanner_admin_database_v1 - >>> - >>> client = spanner_admin_database_v1.DatabaseAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_databases(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_databases(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The instance whose databases should be listed. Values are of - the form ``projects//instances/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Database` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_databases" not in self._inner_api_calls: - self._inner_api_calls[ - "list_databases" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_databases, - default_retry=self._method_configs["ListDatabases"].retry, - default_timeout=self._method_configs["ListDatabases"].timeout, - client_info=self._client_info, - ) - - request = spanner_database_admin_pb2.ListDatabasesRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_databases"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="databases", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py deleted file mode 100644 index 90c9f796e2d7..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py +++ /dev/null @@ -1,68 +0,0 @@ -config = { - "interfaces": { - "google.spanner.admin.database.v1.DatabaseAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "CreateDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetDatabase": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateDatabaseDdl": { - "timeout_millis": 3600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DropDatabase": { - "timeout_millis": 3600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetDatabaseDdl": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "SetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "TestIamPermissions": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "ListDatabases": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py deleted file mode 100644 index aa1a51902763..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/enums.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Database(object): - class State(enum.IntEnum): - """ - Indicates the current state of the database. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The database is still being created. Operations on the database may fail - with ``FAILED_PRECONDITION`` in this state. - READY (int): The database is fully created and ready for use. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py deleted file mode 100644 index cd56873704b1..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2_grpc - - -class DatabaseAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.admin.database.v1 DatabaseAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.admin", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "database_admin_stub": spanner_database_admin_pb2_grpc.DatabaseAdminStub( - channel - ) - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.create_database`. - - Creates a new Cloud Spanner database and starts to prepare it for - serving. The returned ``long-running operation`` will have a name of the - format ``/operations/`` and can be used to - track preparation of the database. The ``metadata`` field type is - ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, - if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].CreateDatabase - - @property - def get_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_database`. - - Gets the state of a Cloud Spanner database. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetDatabase - - @property - def update_database_ddl(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.update_database_ddl`. - - Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - execution of the schema change(s). The ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].UpdateDatabaseDdl - - @property - def drop_database(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.drop_database`. - - Drops (aka deletes) a Cloud Spanner database. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].DropDatabase - - @property - def get_database_ddl(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_database_ddl`. - - Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those - may be queried using the ``Operations`` API. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetDatabaseDdl - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.set_iam_policy`. - - Sets the access control policy on a database resource. Replaces any - existing policy. - - Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].SetIamPolicy - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.get_iam_policy`. - - Gets the access control policy for a database resource. Returns an empty - policy if a database exists but does not have a policy set. - - Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].GetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified database - resource. - - Attempting this RPC on a non-existent Cloud Spanner database will result - in a NOT\_FOUND error if the user has ``spanner.databases.list`` - permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].TestIamPermissions - - @property - def list_databases(self): - """Return the gRPC stub for :meth:`DatabaseAdminClient.list_databases`. - - Lists Cloud Spanner databases. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["database_admin_stub"].ListDatabases diff --git a/spanner/google/cloud/spanner_admin_database_v1/proto/__init__.py b/spanner/google/cloud/spanner_admin_database_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto deleted file mode 100644 index ea5200b4cb9b..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.admin.database.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; -option java_multiple_files = true; -option java_outer_classname = "SpannerDatabaseAdminProto"; -option java_package = "com.google.spanner.admin.database.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; - -// The Instance resource is defined in `google.spanner.admin.instance.v1`. -// Because this is a separate, independent API (technically), we redefine -// the resource name pattern here. -option (google.api.resource_definition) = { - type: "spanner.googleapis.com/Instance" - pattern: "projects/{project}/instances/{instance}" -}; - -// Cloud Spanner Database Admin API -// -// The Cloud Spanner Database Admin API can be used to create, drop, and -// list databases. It also enables updating the schema of pre-existing -// databases. -service DatabaseAdmin { - option (google.api.default_host) = "spanner.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/spanner.admin"; - - // Lists Cloud Spanner databases. - rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/instances/*}/databases" - }; - option (google.api.method_signature) = "parent"; - } - - // Creates a new Cloud Spanner database and starts to prepare it for serving. - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track preparation of the database. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Database][google.spanner.admin.database.v1.Database], if successful. - rpc CreateDatabase(CreateDatabaseRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/instances/*}/databases" - body: "*" - }; - option (google.api.method_signature) = "parent,create_statement"; - option (google.longrunning.operation_info) = { - response_type: "google.spanner.admin.database.v1.Database" - metadata_type: "google.spanner.admin.database.v1.CreateDatabaseMetadata" - }; - } - - // Gets the state of a Cloud Spanner database. - rpc GetDatabase(GetDatabaseRequest) returns (Database) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*/databases/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Updates the schema of a Cloud Spanner database by - // creating/altering/dropping tables, columns, indexes, etc. The returned - // [long-running operation][google.longrunning.Operation] will have a name of - // the format `/operations/` and can be used to - // track execution of the schema change(s). The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. - // The operation has no response. - rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - body: "*" - }; - option (google.api.method_signature) = "database,statements"; - option (google.longrunning.operation_info) = { - response_type: "google.protobuf.Empty" - metadata_type: "google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata" - }; - } - - // Drops (aka deletes) a Cloud Spanner database. - rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{database=projects/*/instances/*/databases/*}" - }; - option (google.api.method_signature) = "database"; - } - - // Returns the schema of a Cloud Spanner database as a list of formatted - // DDL statements. This method does not show pending schema updates, those may - // be queried using the [Operations][google.longrunning.Operations] API. - rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) { - option (google.api.http) = { - get: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - }; - option (google.api.method_signature) = "database"; - } - - // Sets the access control policy on a database resource. - // Replaces any existing policy. - // - // Authorization requires `spanner.databases.setIamPolicy` - // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" - body: "*" - additional_bindings { - post: "/v1/{resource=projects/*/instances/*/backups/*}:setIamPolicy" - body: "*" - } - }; - option (google.api.method_signature) = "resource,policy"; - } - - // Gets the access control policy for a database resource. - // Returns an empty policy if a database exists but does - // not have a policy set. - // - // Authorization requires `spanner.databases.getIamPolicy` permission on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" - body: "*" - additional_bindings { - post: "/v1/{resource=projects/*/instances/*/backups/*}:getIamPolicy" - body: "*" - } - }; - option (google.api.method_signature) = "resource"; - } - - // Returns permissions that the caller has on the specified database resource. - // - // Attempting this RPC on a non-existent Cloud Spanner database will - // result in a NOT_FOUND error if the user has - // `spanner.databases.list` permission on the containing Cloud - // Spanner instance. Otherwise returns an empty set of permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" - body: "*" - additional_bindings { - post: "/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions" - body: "*" - } - }; - option (google.api.method_signature) = "resource,permissions"; - } -} - -// A Cloud Spanner database. -message Database { - option (google.api.resource) = { - type: "spanner.googleapis.com/Database" - pattern: "projects/{project}/instances/{instance}/databases/{database}" - }; - - // Indicates the current state of the database. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The database is still being created. Operations on the database may fail - // with `FAILED_PRECONDITION` in this state. - CREATING = 1; - - // The database is fully created and ready for use. - READY = 2; - } - - // Required. The name of the database. Values are of the form - // `projects//instances//databases/`, - // where `` is as specified in the `CREATE DATABASE` - // statement. This name can be passed to other API methods to - // identify the database. - string name = 1; - - // Output only. The current database state. - State state = 2; -} - -// The request for -// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesRequest { - // Required. The instance whose databases should be listed. - // Values are of the form `projects//instances/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Instance" - } - ]; - - // Number of databases to be returned in the response. If 0 or less, - // defaults to the server's maximum allowed page size. - int32 page_size = 3; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] - // from a previous - // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. - string page_token = 4; -} - -// The response for -// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesResponse { - // Databases that matched the request. - repeated Database databases = 1; - - // `next_page_token` can be sent in a subsequent - // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] - // call to fetch more of the matching databases. - string next_page_token = 2; -} - -// The request for -// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseRequest { - // Required. The name of the instance that will serve the new database. - // Values are of the form `projects//instances/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Instance" - } - ]; - - // Required. A `CREATE DATABASE` statement, which specifies the ID of the - // new database. The database ID must conform to the regular expression - // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. - // If the database ID is a reserved word or if it contains a hyphen, the - // database ID must be enclosed in backticks (`` ` ``). - string create_statement = 2 [(google.api.field_behavior) = REQUIRED]; - - // An optional list of DDL statements to run inside the newly created - // database. Statements can create tables, indexes, etc. These - // statements execute atomically with the creation of the database: - // if there is an error in any statement, the database is not created. - repeated string extra_statements = 3; -} - -// Metadata type for the operation returned by -// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseMetadata { - // The database being created. - string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; -} - -// The request for -// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. -message GetDatabaseRequest { - // Required. The name of the requested database. Values are of the form - // `projects//instances//databases/`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; -} - -// Enqueues the given DDL statements to be applied, in order but not -// necessarily all at once, to the database schema at some point (or -// points) in the future. The server checks that the statements -// are executable (syntactically valid, name tables that exist, etc.) -// before enqueueing them, but they may still fail upon -// later execution (e.g., if a statement from another batch of -// statements is applied first and it conflicts in some way, or if -// there is some data-related problem like a `NULL` value in a column to -// which `NOT NULL` would be added). If a statement fails, all -// subsequent statements in the batch are automatically cancelled. -// -// Each batch of statements is assigned a name which can be used with -// the [Operations][google.longrunning.Operations] API to monitor -// progress. See the -// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] -// field for more details. -message UpdateDatabaseDdlRequest { - // Required. The database to update. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; - - // Required. DDL statements to be applied to the database. - repeated string statements = 2 [(google.api.field_behavior) = REQUIRED]; - - // If empty, the new update request is assigned an - // automatically-generated operation ID. Otherwise, `operation_id` - // is used to construct the name of the resulting - // [Operation][google.longrunning.Operation]. - // - // Specifying an explicit operation ID simplifies determining - // whether the statements were executed in the event that the - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - // call is replayed, or the return value is otherwise lost: the - // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] - // and `operation_id` fields can be combined to form the - // [name][google.longrunning.Operation.name] of the resulting - // [longrunning.Operation][google.longrunning.Operation]: - // `/operations/`. - // - // `operation_id` should be unique within the database, and must be - // a valid identifier: `[a-z][a-z0-9_]*`. Note that - // automatically-generated operation IDs always begin with an - // underscore. If the named operation already exists, - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - // returns `ALREADY_EXISTS`. - string operation_id = 3; -} - -// Metadata type for the operation returned by -// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. -message UpdateDatabaseDdlMetadata { - // The database being modified. - string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; - - // For an update this list contains all the statements. For an - // individual statement, this list contains only that statement. - repeated string statements = 2; - - // Reports the commit timestamps of all statements that have - // succeeded so far, where `commit_timestamps[i]` is the commit - // timestamp for the statement `statements[i]`. - repeated google.protobuf.Timestamp commit_timestamps = 3; -} - -// The request for -// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. -message DropDatabaseRequest { - // Required. The database to be dropped. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; -} - -// The request for -// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlRequest { - // Required. The database whose schema we wish to get. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; -} - -// The response for -// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlResponse { - // A list of formatted DDL statements defining the schema of the database - // specified in the request. - repeated string statements = 1; -} diff --git a/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py b/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py deleted file mode 100644 index 35fd22717e4f..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py +++ /dev/null @@ -1,1146 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner/admin/database_v1/proto/spanner_database_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner/admin/database_v1/proto/spanner_database_admin.proto", - package="google.spanner.admin.database.v1", - syntax="proto3", - serialized_options=_b( - "\n$com.google.spanner.admin.database.v1B\031SpannerDatabaseAdminProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352AJ\n\037spanner.googleapis.com/Instance\022'projects/{project}/instances/{instance}" - ), - serialized_pb=_b( - '\nIgoogle/cloud/spanner/admin/database_v1/proto/spanner_database_admin.proto\x12 google.spanner.admin.database.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xf6\x01\n\x08\x44\x61tabase\x12\x0c\n\x04name\x18\x01 \x01(\t\x12?\n\x05state\x18\x02 \x01(\x0e\x32\x30.google.spanner.admin.database.v1.Database.State"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:b\xea\x41_\n\x1fspanner.googleapis.com/Database\x12\x82\xd3\xe4\x93\x02/\x12-/v1/{parent=projects/*/instances/*}/databases\xda\x41\x06parent\x12\xa4\x02\n\x0e\x43reateDatabase\x12\x37.google.spanner.admin.database.v1.CreateDatabaseRequest\x1a\x1d.google.longrunning.Operation"\xb9\x01\x82\xd3\xe4\x93\x02\x32"-/v1/{parent=projects/*/instances/*}/databases:\x01*\xda\x41\x17parent,create_statement\xca\x41\x64\n)google.spanner.admin.database.v1.Database\x12\x37google.spanner.admin.database.v1.CreateDatabaseMetadata\x12\xad\x01\n\x0bGetDatabase\x12\x34.google.spanner.admin.database.v1.GetDatabaseRequest\x1a*.google.spanner.admin.database.v1.Database"<\x82\xd3\xe4\x93\x02/\x12-/v1/{name=projects/*/instances/*/databases/*}\xda\x41\x04name\x12\x9d\x02\n\x11UpdateDatabaseDdl\x12:.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest\x1a\x1d.google.longrunning.Operation"\xac\x01\x82\xd3\xe4\x93\x02:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\x01*\xda\x41\x13\x64\x61tabase,statements\xca\x41S\n\x15google.protobuf.Empty\x12:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata\x12\xa3\x01\n\x0c\x44ropDatabase\x12\x35.google.spanner.admin.database.v1.DropDatabaseRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02\x33*1/v1/{database=projects/*/instances/*/databases/*}\xda\x41\x08\x64\x61tabase\x12\xcd\x01\n\x0eGetDatabaseDdl\x12\x37.google.spanner.admin.database.v1.GetDatabaseDdlRequest\x1a\x38.google.spanner.admin.database.v1.GetDatabaseDdlResponse"H\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{database=projects/*/instances/*/databases/*}/ddl\xda\x41\x08\x64\x61tabase\x12\xeb\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\x9f\x01\x82\xd3\xe4\x93\x02\x86\x01">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\x01*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\x01*ZA"/instances//databases/``, - where ```` is as specified in the ``CREATE - DATABASE`` statement. This name can be passed to other API - methods to identify the database. - state: - Output only. The current database state. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Database) - ), -) -_sym_db.RegisterMessage(Database) - -ListDatabasesRequest = _reflection.GeneratedProtocolMessageType( - "ListDatabasesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTDATABASESREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. - - - Attributes: - parent: - Required. The instance whose databases should be listed. - Values are of the form - ``projects//instances/``. - page_size: - Number of databases to be returned in the response. If 0 or - less, defaults to the server's maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.database.v1.ListDatabasesResponse.nex - t\_page\_token] from a previous [ListDatabasesResponse][google - .spanner.admin.database.v1.ListDatabasesResponse]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesRequest) - ), -) -_sym_db.RegisterMessage(ListDatabasesRequest) - -ListDatabasesResponse = _reflection.GeneratedProtocolMessageType( - "ListDatabasesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTDATABASESRESPONSE, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The response for - [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. - - - Attributes: - databases: - Databases that matched the request. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListDatabas - es][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabas - es] call to fetch more of the matching databases. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesResponse) - ), -) -_sym_db.RegisterMessage(ListDatabasesResponse) - -CreateDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "CreateDatabaseRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEDATABASEREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. - - - Attributes: - parent: - Required. The name of the instance that will serve the new - database. Values are of the form - ``projects//instances/``. - create_statement: - Required. A ``CREATE DATABASE`` statement, which specifies the - ID of the new database. The database ID must conform to the - regular expression ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be - between 2 and 30 characters in length. If the database ID is a - reserved word or if it contains a hyphen, the database ID must - be enclosed in backticks (`````). - extra_statements: - An optional list of DDL statements to run inside the newly - created database. Statements can create tables, indexes, etc. - These statements execute atomically with the creation of the - database: if there is an error in any statement, the database - is not created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseRequest) - ), -) -_sym_db.RegisterMessage(CreateDatabaseRequest) - -CreateDatabaseMetadata = _reflection.GeneratedProtocolMessageType( - "CreateDatabaseMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEDATABASEMETADATA, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""Metadata type for the operation returned by - [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. - - - Attributes: - database: - The database being created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateDatabaseMetadata) - ), -) -_sym_db.RegisterMessage(CreateDatabaseMetadata) - -GetDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "GetDatabaseRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETDATABASEREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. - - - Attributes: - name: - Required. The name of the requested database. Values are of - the form ``projects//instances//databases/< - database>``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseRequest) - ), -) -_sym_db.RegisterMessage(GetDatabaseRequest) - -UpdateDatabaseDdlRequest = _reflection.GeneratedProtocolMessageType( - "UpdateDatabaseDdlRequest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATABASEDDLREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""Enqueues the given DDL statements to be applied, in order - but not necessarily all at once, to the database schema at some point - (or points) in the future. The server checks that the statements are - executable (syntactically valid, name tables that exist, etc.) before - enqueueing them, but they may still fail upon later execution (e.g., if - a statement from another batch of statements is applied first and it - conflicts in some way, or if there is some data-related problem like a - ``NULL`` value in a column to which ``NOT NULL`` would be added). If a - statement fails, all subsequent statements in the batch are - automatically cancelled. - - Each batch of statements is assigned a name which can be used with the - [Operations][google.longrunning.Operations] API to monitor progress. See - the - [operation\_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation\_id] - field for more details. - - - Attributes: - database: - Required. The database to update. - statements: - Required. DDL statements to be applied to the database. - operation_id: - If empty, the new update request is assigned an automatically- - generated operation ID. Otherwise, ``operation_id`` is used to - construct the name of the resulting - [Operation][google.longrunning.Operation]. Specifying an - explicit operation ID simplifies determining whether the - statements were executed in the event that the [UpdateDatabase - Ddl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateData - baseDdl] call is replayed, or the return value is otherwise - lost: the [database][google.spanner.admin.database.v1.UpdateDa - tabaseDdlRequest.database] and ``operation_id`` fields can be - combined to form the [name][google.longrunning.Operation.name] - of the resulting - [longrunning.Operation][google.longrunning.Operation]: - ``/operations/``. ``operation_id`` - should be unique within the database, and must be a valid - identifier: ``[a-z][a-z0-9_]*``. Note that automatically- - generated operation IDs always begin with an underscore. If - the named operation already exists, [UpdateDatabaseDdl][google - .spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - returns ``ALREADY_EXISTS``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlRequest) - ), -) -_sym_db.RegisterMessage(UpdateDatabaseDdlRequest) - -UpdateDatabaseDdlMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateDatabaseDdlMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATABASEDDLMETADATA, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""Metadata type for the operation returned by - [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. - - - Attributes: - database: - The database being modified. - statements: - For an update this list contains all the statements. For an - individual statement, this list contains only that statement. - commit_timestamps: - Reports the commit timestamps of all statements that have - succeeded so far, where ``commit_timestamps[i]`` is the commit - timestamp for the statement ``statements[i]``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata) - ), -) -_sym_db.RegisterMessage(UpdateDatabaseDdlMetadata) - -DropDatabaseRequest = _reflection.GeneratedProtocolMessageType( - "DropDatabaseRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DROPDATABASEREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. - - - Attributes: - database: - Required. The database to be dropped. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DropDatabaseRequest) - ), -) -_sym_db.RegisterMessage(DropDatabaseRequest) - -GetDatabaseDdlRequest = _reflection.GeneratedProtocolMessageType( - "GetDatabaseDdlRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETDATABASEDDLREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. - - - Attributes: - database: - Required. The database whose schema we wish to get. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlRequest) - ), -) -_sym_db.RegisterMessage(GetDatabaseDdlRequest) - -GetDatabaseDdlResponse = _reflection.GeneratedProtocolMessageType( - "GetDatabaseDdlResponse", - (_message.Message,), - dict( - DESCRIPTOR=_GETDATABASEDDLRESPONSE, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The response for - [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. - - - Attributes: - statements: - A list of formatted DDL statements defining the schema of the - database specified in the request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetDatabaseDdlResponse) - ), -) -_sym_db.RegisterMessage(GetDatabaseDdlResponse) - - -DESCRIPTOR._options = None -_DATABASE._options = None -_LISTDATABASESREQUEST.fields_by_name["parent"]._options = None -_CREATEDATABASEREQUEST.fields_by_name["parent"]._options = None -_CREATEDATABASEREQUEST.fields_by_name["create_statement"]._options = None -_CREATEDATABASEMETADATA.fields_by_name["database"]._options = None -_GETDATABASEREQUEST.fields_by_name["name"]._options = None -_UPDATEDATABASEDDLREQUEST.fields_by_name["database"]._options = None -_UPDATEDATABASEDDLREQUEST.fields_by_name["statements"]._options = None -_UPDATEDATABASEDDLMETADATA.fields_by_name["database"]._options = None -_DROPDATABASEREQUEST.fields_by_name["database"]._options = None -_GETDATABASEDDLREQUEST.fields_by_name["database"]._options = None - -_DATABASEADMIN = _descriptor.ServiceDescriptor( - name="DatabaseAdmin", - full_name="google.spanner.admin.database.v1.DatabaseAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\026spanner.googleapis.com\322A\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.admin" - ), - serialized_start=1675, - serialized_end=3896, - methods=[ - _descriptor.MethodDescriptor( - name="ListDatabases", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases", - index=0, - containing_service=None, - input_type=_LISTDATABASESREQUEST, - output_type=_LISTDATABASESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002/\022-/v1/{parent=projects/*/instances/*}/databases\332A\006parent" - ), - ), - _descriptor.MethodDescriptor( - name="CreateDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase", - index=1, - containing_service=None, - input_type=_CREATEDATABASEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0022"-/v1/{parent=projects/*/instances/*}/databases:\001*\332A\027parent,create_statement\312Ad\n)google.spanner.admin.database.v1.Database\0227google.spanner.admin.database.v1.CreateDatabaseMetadata' - ), - ), - _descriptor.MethodDescriptor( - name="GetDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase", - index=2, - containing_service=None, - input_type=_GETDATABASEREQUEST, - output_type=_DATABASE, - serialized_options=_b( - "\202\323\344\223\002/\022-/v1/{name=projects/*/instances/*/databases/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="UpdateDatabaseDdl", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl", - index=3, - containing_service=None, - input_type=_UPDATEDATABASEDDLREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\001*\332A\023database,statements\312AS\n\025google.protobuf.Empty\022:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata" - ), - ), - _descriptor.MethodDescriptor( - name="DropDatabase", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase", - index=4, - containing_service=None, - input_type=_DROPDATABASEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\0023*1/v1/{database=projects/*/instances/*/databases/*}\332A\010database" - ), - ), - _descriptor.MethodDescriptor( - name="GetDatabaseDdl", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl", - index=5, - containing_service=None, - input_type=_GETDATABASEDDLREQUEST, - output_type=_GETDATABASEDDLRESPONSE, - serialized_options=_b( - "\202\323\344\223\0027\0225/v1/{database=projects/*/instances/*/databases/*}/ddl\332A\010database" - ), - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy", - index=6, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\002\206\001">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\001*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\001*ZA"/operations/` and - can be used to track preparation of the database. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. - The [response][google.longrunning.Operation.response] field type is - [Database][google.spanner.admin.database.v1.Database], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDatabase(self, request, context): - """Gets the state of a Cloud Spanner database. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateDatabaseDdl(self, request, context): - """Updates the schema of a Cloud Spanner database by - creating/altering/dropping tables, columns, indexes, etc. The returned - [long-running operation][google.longrunning.Operation] will have a name of - the format `/operations/` and can be used to - track execution of the schema change(s). The - [metadata][google.longrunning.Operation.metadata] field type is - [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. - The operation has no response. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropDatabase(self, request, context): - """Drops (aka deletes) a Cloud Spanner database. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetDatabaseDdl(self, request, context): - """Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those may - be queried using the [Operations][google.longrunning.Operations] API. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a database resource. - Replaces any existing policy. - - Authorization requires `spanner.databases.setIamPolicy` - permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a database resource. - Returns an empty policy if a database exists but does - not have a policy set. - - Authorization requires `spanner.databases.getIamPolicy` permission on - [resource][google.iam.v1.GetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified database resource. - - Attempting this RPC on a non-existent Cloud Spanner database will - result in a NOT_FOUND error if the user has - `spanner.databases.list` permission on the containing Cloud - Spanner instance. Otherwise returns an empty set of permissions. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_DatabaseAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListDatabases": grpc.unary_unary_rpc_method_handler( - servicer.ListDatabases, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabasesResponse.SerializeToString, - ), - "CreateDatabase": grpc.unary_unary_rpc_method_handler( - servicer.CreateDatabase, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.CreateDatabaseRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetDatabase": grpc.unary_unary_rpc_method_handler( - servicer.GetDatabase, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.Database.SerializeToString, - ), - "UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler( - servicer.UpdateDatabaseDdl, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DropDatabase": grpc.unary_unary_rpc_method_handler( - servicer.DropDatabase, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.DropDatabaseRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetDatabaseDdl": grpc.unary_unary_rpc_method_handler( - servicer.GetDatabaseDdl, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/spanner/google/cloud/spanner_admin_database_v1/types.py b/spanner/google/cloud/spanner_admin_database_v1/types.py deleted file mode 100644 index 43103a0b6d9d..000000000000 --- a/spanner/google/cloud/spanner_admin_database_v1/types.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.iam.v1.logging import audit_data_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 - - -_shared_modules = [ - http_pb2, - iam_policy_pb2, - policy_pb2, - audit_data_pb2, - operations_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [spanner_database_admin_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_admin_database_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/spanner/google/cloud/spanner_admin_instance_v1/__init__.py b/spanner/google/cloud/spanner_admin_instance_v1/__init__.py deleted file mode 100644 index 53f32d3b4705..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -from google.cloud.spanner_admin_instance_v1 import types -from google.cloud.spanner_admin_instance_v1.gapic import enums -from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - - -class InstanceAdminClient(instance_admin_client.InstanceAdminClient): - __doc__ = instance_admin_client.InstanceAdminClient.__doc__ - enums = enums - - -__all__ = ("enums", "types", "InstanceAdminClient") diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/__init__.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/enums.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/enums.py deleted file mode 100644 index cfb40655bedf..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/enums.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Instance(object): - class State(enum.IntEnum): - """ - Indicates the current state of the instance. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The instance is still being created. Resources may not be - available yet, and operations such as database creation may not - work. - READY (int): The instance is fully created and ready to do work such as - creating databases. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - -class ReplicaInfo(object): - class ReplicaType(enum.IntEnum): - """ - Indicates the type of replica. See the `replica types - documentation `__ - for more details. - - Attributes: - TYPE_UNSPECIFIED (int): Not specified. - READ_WRITE (int): Read-write replicas support both reads and writes. These replicas: - - - Maintain a full copy of your data. - - Serve reads. - - Can vote whether to commit a write. - - Participate in leadership election. - - Are eligible to become a leader. - READ_ONLY (int): Read-only replicas only support reads (not writes). Read-only replicas: - - - Maintain a full copy of your data. - - Serve reads. - - Do not participate in voting to commit writes. - - Are not eligible to become a leader. - WITNESS (int): Witness replicas don't support reads but do participate in voting to - commit writes. Witness replicas: - - - Do not maintain a full copy of data. - - Do not serve reads. - - Vote whether to commit writes. - - Participate in leader election but are not eligible to become leader. - """ - - TYPE_UNSPECIFIED = 0 - READ_WRITE = 1 - READ_ONLY = 2 - WITNESS = 3 diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py deleted file mode 100644 index c7c4912f2a55..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py +++ /dev/null @@ -1,1226 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.admin.instance.v1 InstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.spanner_admin_instance_v1.gapic import enums -from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client_config -from google.cloud.spanner_admin_instance_v1.gapic.transports import ( - instance_admin_grpc_transport, -) -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2_grpc -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class InstanceAdminClient(object): - """ - Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.admin.instance.v1.InstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - InstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def instance_config_path(cls, project, instance_config): - """Return a fully-qualified instance_config string.""" - return google.api_core.path_template.expand( - "projects/{project}/instanceConfigs/{instance_config}", - project=project, - instance_config=instance_config, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.InstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.InstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=instance_admin_grpc_transport.InstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = instance_admin_grpc_transport.InstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def list_instance_configs( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists the supported instance configurations for a given project. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_instance_configs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_instance_configs(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The name of the project for which a list of supported instance - configurations is requested. Values are of the form - ``projects/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instance_configs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instance_configs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instance_configs, - default_retry=self._method_configs["ListInstanceConfigs"].retry, - default_timeout=self._method_configs["ListInstanceConfigs"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=parent, page_size=page_size - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_instance_configs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="instance_configs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_instance_config( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a particular instance configuration. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_config_path('[PROJECT]', '[INSTANCE_CONFIG]') - >>> - >>> response = client.get_instance_config(name) - - Args: - name (str): Required. The name of the requested instance configuration. Values are - of the form ``projects//instanceConfigs/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.InstanceConfig` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance_config" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance_config" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance_config, - default_retry=self._method_configs["GetInstanceConfig"].retry, - default_timeout=self._method_configs["GetInstanceConfig"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.GetInstanceConfigRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance_config"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_size=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all instances in the given project. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # Iterate over all results - >>> for element in client.list_instances(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_instances(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The name of the project for which a list of instances is - requested. Values are of the form ``projects/``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - filter_ (str): An expression for filtering the results of the request. Filter rules are - case insensitive. The fields eligible for filtering are: - - - ``name`` - - ``display_name`` - - ``labels.key`` where key is the name of a label - - Some examples of using filters are: - - - ``name:*`` --> The instance has a name. - - ``name:Howl`` --> The instance's name contains the string "howl". - - ``name:HOWL`` --> Equivalent to above. - - ``NAME:howl`` --> Equivalent to above. - - ``labels.env:*`` --> The instance has the label "env". - - ``labels.env:dev`` --> The instance has the label "env" and the value - of the label contains the string "dev". - - ``name:howl labels.env:dev`` --> The instance's name contains "howl" - and it has the label "env" with its value containing "dev". - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_size=page_size, filter=filter_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_instances"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="instances", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_instance( - self, - name, - field_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a particular instance. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): Required. The name of the requested instance. Values are of the form - ``projects//instances/``. - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): If field\_mask is present, specifies the subset of [][google.spanner.admin.instance.v1.Instance] fields - that should be returned. If absent, all [][google.spanner.admin.instance.v1.Instance] fields are - returned. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.GetInstanceRequest( - name=name, field_mask=field_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_instance( - self, - parent, - instance_id, - instance, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` can be used to track the progress of - preparing the new instance. The instance name is assigned by the caller. - If the named instance already exists, ``CreateInstance`` returns - ``ALREADY_EXISTS``. - - Immediately upon completion of this request: - - - The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is ``CREATING``. - - Until completion of the returned operation: - - - Cancelling the operation renders the instance immediately unreadable - via the API. - - The instance can be deleted. - - All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - - Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - - Databases can be created in the instance. - - The instance's allocated resource levels are readable via the API. - - The instance's state becomes ``READY``. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - creation of the instance. The ``metadata`` field type is - ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the project in which to create the instance. - Values are of the form ``projects/``. - instance_id (str): Required. The ID of the instance to create. Valid identifiers are of the - form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 2 and 64 characters - in length. - instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to create. The name may be omitted, but if - specified must be ``/instances/``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_instance_admin_pb2.Instance, - metadata_type=spanner_instance_admin_pb2.CreateInstanceMetadata, - ) - - def update_instance( - self, - instance, - field_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance, and begins allocating or releasing resources as - requested. The returned ``long-running operation`` can be used to track - the progress of updating the instance. If the named instance does not - exist, returns ``NOT_FOUND``. - - Immediately upon completion of this request: - - - For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - - Cancelling the operation sets its metadata's ``cancel_time``, and - begins restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, after which - point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance are rejected. - - Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - - Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - - All newly-reserved resources are available for serving the instance's - tables. - - The instance's new resource levels are readable via the API. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - the instance modification. The ``metadata`` field type is - ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Authorization requires ``spanner.instances.update`` permission on - resource ``name``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `field_mask`: - >>> field_mask = {} - >>> - >>> response = client.update_instance(instance, field_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the instance - name. Otherwise, only fields mentioned in - [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask] - need be included. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in - [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] - should be updated. The field mask must always be specified; this - prevents any future fields in - [][google.spanner.admin.instance.v1.Instance] from being erased - accidentally by clients that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.UpdateInstanceRequest( - instance=instance, field_mask=field_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - spanner_instance_admin_pb2.Instance, - metadata_type=spanner_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an instance. - - Immediately upon completion of the request: - - - Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - - The instance and *all of its databases* immediately and irrevocably - disappear from the API. All data in the databases is permanently - deleted. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): Required. The name of the instance to be deleted. Values are of the form - ``projects//instances/`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = spanner_instance_admin_pb2.DeleteInstanceRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires ``spanner.instances.setIamPolicy`` on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an - empty policy if an instance exists but does not have a policy set. - - Authorization requires ``spanner.instances.getIamPolicy`` on - ``resource``. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_admin_instance_v1.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance - resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource - will result in a NOT\_FOUND error if the user has - ``spanner.instances.list`` permission on the containing Google Cloud - Project. Otherwise returns an empty set of permissions. - - Example: - >>> from google.cloud import spanner_admin_instance_v1 - >>> - >>> client = spanner_admin_instance_v1.InstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_admin_instance_v1.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py deleted file mode 100644 index 4331f879de11..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py +++ /dev/null @@ -1,73 +0,0 @@ -config = { - "interfaces": { - "google.spanner.admin.instance.v1.InstanceAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - } - }, - "methods": { - "ListInstanceConfigs": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetInstanceConfig": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListInstances": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetInstance": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "CreateInstance": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateInstance": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "DeleteInstance": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "SetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "GetIamPolicy": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "TestIamPermissions": { - "timeout_millis": 30000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py deleted file mode 100644 index 1d3c404bf6a5..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py +++ /dev/null @@ -1,340 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2_grpc - - -class InstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.admin.instance.v1 InstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.admin", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "instance_admin_stub": spanner_instance_admin_pb2_grpc.InstanceAdminStub( - channel - ) - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def list_instance_configs(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.list_instance_configs`. - - Lists the supported instance configurations for a given project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].ListInstanceConfigs - - @property - def get_instance_config(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_instance_config`. - - Gets information about a particular instance configuration. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetInstanceConfig - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.list_instances`. - - Lists all instances in the given project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].ListInstances - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_instance`. - - Gets information about a particular instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetInstance - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.create_instance`. - - Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` can be used to track the progress of - preparing the new instance. The instance name is assigned by the caller. - If the named instance already exists, ``CreateInstance`` returns - ``ALREADY_EXISTS``. - - Immediately upon completion of this request: - - - The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is ``CREATING``. - - Until completion of the returned operation: - - - Cancelling the operation renders the instance immediately unreadable - via the API. - - The instance can be deleted. - - All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - - Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - - Databases can be created in the instance. - - The instance's allocated resource levels are readable via the API. - - The instance's state becomes ``READY``. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - creation of the instance. The ``metadata`` field type is - ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].CreateInstance - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.update_instance`. - - Updates an instance, and begins allocating or releasing resources as - requested. The returned ``long-running operation`` can be used to track - the progress of updating the instance. If the named instance does not - exist, returns ``NOT_FOUND``. - - Immediately upon completion of this request: - - - For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - - Cancelling the operation sets its metadata's ``cancel_time``, and - begins restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, after which - point it terminates with a ``CANCELLED`` status. - - All other attempts to modify the instance are rejected. - - Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - - Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - - All newly-reserved resources are available for serving the instance's - tables. - - The instance's new resource levels are readable via the API. - - The returned ``long-running operation`` will have a name of the format - ``/operations/`` and can be used to track - the instance modification. The ``metadata`` field type is - ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, - if successful. - - Authorization requires ``spanner.instances.update`` permission on - resource ``name``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].UpdateInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.delete_instance`. - - Deletes an instance. - - Immediately upon completion of the request: - - - Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - - The instance and *all of its databases* immediately and irrevocably - disappear from the API. All data in the databases is permanently - deleted. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].DeleteInstance - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires ``spanner.instances.setIamPolicy`` on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].SetIamPolicy - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an - empty policy if an instance exists but does not have a policy set. - - Authorization requires ``spanner.instances.getIamPolicy`` on - ``resource``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].GetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`InstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance - resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource - will result in a NOT\_FOUND error if the user has - ``spanner.instances.list`` permission on the containing Google Cloud - Project. Otherwise returns an empty set of permissions. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["instance_admin_stub"].TestIamPermissions diff --git a/spanner/google/cloud/spanner_admin_instance_v1/proto/__init__.py b/spanner/google/cloud/spanner_admin_instance_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto b/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto deleted file mode 100644 index a4378741336c..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto +++ /dev/null @@ -1,611 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.admin.instance.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance"; -option java_multiple_files = true; -option java_outer_classname = "SpannerInstanceAdminProto"; -option java_package = "com.google.spanner.admin.instance.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1"; - -// Cloud Spanner Instance Admin API -// -// The Cloud Spanner Instance Admin API can be used to create, delete, -// modify and list instances. Instances are dedicated Cloud Spanner serving -// and storage resources to be used by Cloud Spanner databases. -// -// Each instance has a "configuration", which dictates where the -// serving resources for the Cloud Spanner instance are located (e.g., -// US-central, Europe). Configurations are created by Google based on -// resource availability. -// -// Cloud Spanner billing is based on the instances that exist and their -// sizes. After an instance exists, there are no additional -// per-database or per-operation charges for use of the instance -// (though there may be additional network bandwidth charges). -// Instances offer isolation: problems with databases in one instance -// will not affect other instances. However, within an instance -// databases can affect each other. For example, if one database in an -// instance receives a lot of requests and consumes most of the -// instance resources, fewer resources are available for other -// databases in that instance, and their performance may suffer. -service InstanceAdmin { - option (google.api.default_host) = "spanner.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/spanner.admin"; - - // Lists the supported instance configurations for a given project. - rpc ListInstanceConfigs(ListInstanceConfigsRequest) returns (ListInstanceConfigsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instanceConfigs" - }; - option (google.api.method_signature) = "parent"; - } - - // Gets information about a particular instance configuration. - rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instanceConfigs/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists all instances in the given project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instances" - }; - option (google.api.method_signature) = "parent"; - } - - // Gets information about a particular instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Creates an instance and begins preparing it to begin serving. The - // returned [long-running operation][google.longrunning.Operation] - // can be used to track the progress of preparing the new - // instance. The instance name is assigned by the caller. If the - // named instance already exists, `CreateInstance` returns - // `ALREADY_EXISTS`. - // - // Immediately upon completion of this request: - // - // * The instance is readable via the API, with all requested attributes - // but no allocated resources. Its state is `CREATING`. - // - // Until completion of the returned operation: - // - // * Cancelling the operation renders the instance immediately unreadable - // via the API. - // * The instance can be deleted. - // * All other attempts to modify the instance are rejected. - // - // Upon completion of the returned operation: - // - // * Billing for all successfully-allocated resources begins (some types - // may have lower than the requested levels). - // * Databases can be created in the instance. - // * The instance's allocated resource levels are readable via the API. - // * The instance's state becomes `READY`. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track creation of the instance. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/instances" - body: "*" - }; - option (google.api.method_signature) = "parent,instance_id,instance"; - option (google.longrunning.operation_info) = { - response_type: "google.spanner.admin.instance.v1.Instance" - metadata_type: "google.spanner.admin.instance.v1.CreateInstanceMetadata" - }; - } - - // Updates an instance, and begins allocating or releasing resources - // as requested. The returned [long-running - // operation][google.longrunning.Operation] can be used to track the - // progress of updating the instance. If the named instance does not - // exist, returns `NOT_FOUND`. - // - // Immediately upon completion of this request: - // - // * For resource types for which a decrease in the instance's allocation - // has been requested, billing is based on the newly-requested level. - // - // Until completion of the returned operation: - // - // * Cancelling the operation sets its metadata's - // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - // restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, - // after which point it terminates with a `CANCELLED` status. - // * All other attempts to modify the instance are rejected. - // * Reading the instance via the API continues to give the pre-request - // resource levels. - // - // Upon completion of the returned operation: - // - // * Billing begins for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources are available for serving the instance's - // tables. - // * The instance's new resource levels are readable via the API. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track the instance modification. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - // - // Authorization requires `spanner.instances.update` permission on - // resource [name][google.spanner.admin.instance.v1.Instance.name]. - rpc UpdateInstance(UpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{instance.name=projects/*/instances/*}" - body: "*" - }; - option (google.api.method_signature) = "instance,field_mask"; - option (google.longrunning.operation_info) = { - response_type: "google.spanner.admin.instance.v1.Instance" - metadata_type: "google.spanner.admin.instance.v1.UpdateInstanceMetadata" - }; - } - - // Deletes an instance. - // - // Immediately upon completion of the request: - // - // * Billing ceases for all of the instance's reserved resources. - // - // Soon afterward: - // - // * The instance and *all of its databases* immediately and - // irrevocably disappear from the API. All data in the databases - // is permanently deleted. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/instances/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - // - // Authorization requires `spanner.instances.setIamPolicy` on - // [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - option (google.api.method_signature) = "resource,policy"; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - // - // Authorization requires `spanner.instances.getIamPolicy` on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - option (google.api.method_signature) = "resource"; - } - - // Returns permissions that the caller has on the specified instance resource. - // - // Attempting this RPC on a non-existent Cloud Spanner instance resource will - // result in a NOT_FOUND error if the user has `spanner.instances.list` - // permission on the containing Google Cloud Project. Otherwise returns an - // empty set of permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - option (google.api.method_signature) = "resource,permissions"; - } -} - -message ReplicaInfo { - // Indicates the type of replica. See the [replica types - // documentation](https://cloud.google.com/spanner/docs/replication#replica_types) - // for more details. - enum ReplicaType { - // Not specified. - TYPE_UNSPECIFIED = 0; - - // Read-write replicas support both reads and writes. These replicas: - // - // * Maintain a full copy of your data. - // * Serve reads. - // * Can vote whether to commit a write. - // * Participate in leadership election. - // * Are eligible to become a leader. - READ_WRITE = 1; - - // Read-only replicas only support reads (not writes). Read-only replicas: - // - // * Maintain a full copy of your data. - // * Serve reads. - // * Do not participate in voting to commit writes. - // * Are not eligible to become a leader. - READ_ONLY = 2; - - // Witness replicas don't support reads but do participate in voting to - // commit writes. Witness replicas: - // - // * Do not maintain a full copy of data. - // * Do not serve reads. - // * Vote whether to commit writes. - // * Participate in leader election but are not eligible to become leader. - WITNESS = 3; - } - - // The location of the serving resources, e.g. "us-central1". - string location = 1; - - // The type of replica. - ReplicaType type = 2; - - // If true, this location is designated as the default leader location where - // leader replicas are placed. See the [region types - // documentation](https://cloud.google.com/spanner/docs/instances#region_types) - // for more details. - bool default_leader_location = 3; -} - -// A possible configuration for a Cloud Spanner instance. Configurations -// define the geographic placement of nodes and their replication. -message InstanceConfig { - option (google.api.resource) = { - type: "spanner.googleapis.com/InstanceConfig" - pattern: "projects/{project}/instanceConfigs/{instance_config}" - }; - - // A unique identifier for the instance configuration. Values - // are of the form - // `projects//instanceConfigs/[a-z][-a-z0-9]*` - string name = 1; - - // The name of this instance configuration as it appears in UIs. - string display_name = 2; - - // The geographic placement of nodes in this instance configuration and their - // replication properties. - repeated ReplicaInfo replicas = 3; -} - -// An isolated set of Cloud Spanner resources on which databases can be hosted. -message Instance { - option (google.api.resource) = { - type: "spanner.googleapis.com/Instance" - pattern: "projects/{project}/instances/{instance}" - }; - - // Indicates the current state of the instance. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The instance is still being created. Resources may not be - // available yet, and operations such as database creation may not - // work. - CREATING = 1; - - // The instance is fully created and ready to do work such as - // creating databases. - READY = 2; - } - - // Required. A unique identifier for the instance, which cannot be changed - // after the instance is created. Values are of the form - // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final - // segment of the name must be between 2 and 64 characters in length. - string name = 1; - - // Required. The name of the instance's configuration. Values are of the form - // `projects//instanceConfigs/`. See - // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. - string config = 2 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/InstanceConfig" - }]; - - // Required. The descriptive name for this instance as it appears in UIs. - // Must be unique per project and between 4 and 30 characters in length. - string display_name = 3; - - // Required. The number of nodes allocated to this instance. This may be zero - // in API responses for instances that are not yet in state `READY`. - // - // See [the - // documentation](https://cloud.google.com/spanner/docs/instances#node_count) - // for more information about nodes. - int32 node_count = 5; - - // Output only. The current instance state. For - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be - // either omitted or set to `CREATING`. For - // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be - // either omitted or set to `READY`. - State state = 6; - - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given resource. - // - // See https://goo.gl/xmQnxf for more information on and examples of labels. - // - // If you plan to use labels in your own code, please note that additional - // characters may be allowed in the future. And so you are advised to use an - // internal label representation, such as JSON, which doesn't rely upon - // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - map labels = 7; - - // Output only. The endpoint URIs based on the instance config. - // For example, instances located in a specific cloud region (or multi region) - // such as nam3, would have a nam3 specific endpoint URI. - // This URI is to be used implictly by SDK clients, with fallback to default - // URI. These endpoints are intended to optimize the network routing between - // the client and the instance's serving resources. - // If multiple endpoints are present, client may establish connections using - // any of the given URIs. - repeated string endpoint_uris = 8; -} - -// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsRequest { - // Required. The name of the project for which a list of supported instance - // configurations is requested. Values are of the form - // `projects/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Number of instance configurations to be returned in the response. If 0 or - // less, defaults to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] - // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. - string page_token = 3; -} - -// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsResponse { - // The list of requested instance configurations. - repeated InstanceConfig instance_configs = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to - // fetch more of the matching instance configurations. - string next_page_token = 2; -} - -// The request for -// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. -message GetInstanceConfigRequest { - // Required. The name of the requested instance configuration. Values are of - // the form `projects//instanceConfigs/`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/InstanceConfig" - } - ]; -} - -// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. -message GetInstanceRequest { - // Required. The name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Instance" - } - ]; - - // If field_mask is present, specifies the subset of [][Instance] fields that - // should be returned. - // If absent, all [][Instance] fields are returned. - google.protobuf.FieldMask field_mask = 2; -} - -// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceRequest { - // Required. The name of the project in which to create the instance. Values - // are of the form `projects/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Required. The ID of the instance to create. Valid identifiers are of the - // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in - // length. - string instance_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The instance to create. The name may be omitted, but if - // specified must be `/instances/`. - Instance instance = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesRequest { - // Required. The name of the project for which a list of instances is - // requested. Values are of the form `projects/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Number of instances to be returned in the response. If 0 or less, defaults - // to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a - // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. - string page_token = 3; - - // An expression for filtering the results of the request. Filter rules are - // case insensitive. The fields eligible for filtering are: - // - // * `name` - // * `display_name` - // * `labels.key` where key is the name of a label - // - // Some examples of using filters are: - // - // * `name:*` --> The instance has a name. - // * `name:Howl` --> The instance's name contains the string "howl". - // * `name:HOWL` --> Equivalent to above. - // * `NAME:howl` --> Equivalent to above. - // * `labels.env:*` --> The instance has the label "env". - // * `labels.env:dev` --> The instance has the label "env" and the value of - // the label contains the string "dev". - // * `name:howl labels.env:dev` --> The instance's name contains "howl" and - // it has the label "env" with its value - // containing "dev". - string filter = 4; -} - -// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more - // of the matching instances. - string next_page_token = 2; -} - -// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceRequest { - // Required. The instance to update, which must always include the instance - // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. - Instance instance = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. - // The field mask must always be specified; this prevents any future fields in - // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know - // about them. - google.protobuf.FieldMask field_mask = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. -message DeleteInstanceRequest { - // Required. The name of the instance to be deleted. Values are of the form - // `projects//instances/` - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Instance" - } - ]; -} - -// Metadata type for the operation returned by -// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceMetadata { - // The instance being created. - Instance instance = 1; - - // The time at which the - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was - // received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} - -// Metadata type for the operation returned by -// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceMetadata { - // The desired end state of the update. - Instance instance = 1; - - // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] - // request was received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} diff --git a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py b/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py deleted file mode 100644 index 356c47f1a04b..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py +++ /dev/null @@ -1,1861 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner/admin/instance_v1/proto/spanner_instance_admin.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner/admin/instance_v1/proto/spanner_instance_admin.proto", - package="google.spanner.admin.instance.v1", - syntax="proto3", - serialized_options=_b( - "\n$com.google.spanner.admin.instance.v1B\031SpannerInstanceAdminProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance\252\002&Google.Cloud.Spanner.Admin.Instance.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Instance\\V1" - ), - serialized_pb=_b( - '\nIgoogle/cloud/spanner/admin/instance_v1/proto/spanner_instance_admin.proto\x12 google.spanner.admin.instance.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xda\x01\n\x0bReplicaInfo\x12\x10\n\x08location\x18\x01 \x01(\t\x12G\n\x04type\x18\x02 \x01(\x0e\x32\x39.google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType\x12\x1f\n\x17\x64\x65\x66\x61ult_leader_location\x18\x03 \x01(\x08"O\n\x0bReplicaType\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nREAD_WRITE\x10\x01\x12\r\n\tREAD_ONLY\x10\x02\x12\x0b\n\x07WITNESS\x10\x03"\xd7\x01\n\x0eInstanceConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12?\n\x08replicas\x18\x03 \x03(\x0b\x32-.google.spanner.admin.instance.v1.ReplicaInfo:`\xea\x41]\n%spanner.googleapis.com/InstanceConfig\x12\x34projects/{project}/instanceConfigs/{instance_config}"\xd5\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12:\n\x06\x63onfig\x18\x02 \x01(\tB*\xfa\x41\'\n%spanner.googleapis.com/InstanceConfig\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x12\n\nnode_count\x18\x05 \x01(\x05\x12?\n\x05state\x18\x06 \x01(\x0e\x32\x30.google.spanner.admin.instance.v1.Instance.State\x12\x46\n\x06labels\x18\x07 \x03(\x0b\x32\x36.google.spanner.admin.instance.v1.Instance.LabelsEntry\x12\x15\n\rendpoint_uris\x18\x08 \x03(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:M\xea\x41J\n\x1fspanner.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\x88\x01\n\x1aListInstanceConfigsRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"\x82\x01\n\x1bListInstanceConfigsResponse\x12J\n\x10instance_configs\x18\x01 \x03(\x0b\x32\x30.google.spanner.admin.instance.v1.InstanceConfig\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"W\n\x18GetInstanceConfigRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%spanner.googleapis.com/InstanceConfig"{\n\x12GetInstanceRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb9\x01\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x08instance\x18\x03 \x01(\x0b\x32*.google.spanner.admin.instance.v1.InstanceB\x03\xe0\x41\x02"\x92\x01\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x04 \x01(\t"o\n\x15ListInstancesResponse\x12=\n\tinstances\x18\x01 \x03(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8f\x01\n\x15UpdateInstanceRequest\x12\x41\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.InstanceB\x03\xe0\x41\x02\x12\x33\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"N\n\x15\x44\x65leteInstanceRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance"\xe5\x01\n\x16\x43reateInstanceMetadata\x12<\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xe5\x01\n\x16UpdateInstanceMetadata\x12<\n\x08instance\x18\x01 \x01(\x0b\x32*.google.spanner.admin.instance.v1.Instance\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xbf\x10\n\rInstanceAdmin\x12\xcc\x01\n\x13ListInstanceConfigs\x12<.google.spanner.admin.instance.v1.ListInstanceConfigsRequest\x1a=.google.spanner.admin.instance.v1.ListInstanceConfigsResponse"8\x82\xd3\xe4\x93\x02)\x12\'/v1/{parent=projects/*}/instanceConfigs\xda\x41\x06parent\x12\xb9\x01\n\x11GetInstanceConfig\x12:.google.spanner.admin.instance.v1.GetInstanceConfigRequest\x1a\x30.google.spanner.admin.instance.v1.InstanceConfig"6\x82\xd3\xe4\x93\x02)\x12\'/v1/{name=projects/*/instanceConfigs/*}\xda\x41\x04name\x12\xb4\x01\n\rListInstances\x12\x36.google.spanner.admin.instance.v1.ListInstancesRequest\x1a\x37.google.spanner.admin.instance.v1.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v1/{parent=projects/*}/instances\xda\x41\x06parent\x12\xa1\x01\n\x0bGetInstance\x12\x34.google.spanner.admin.instance.v1.GetInstanceRequest\x1a*.google.spanner.admin.instance.v1.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v1/{name=projects/*/instances/*}\xda\x41\x04name\x12\x9c\x02\n\x0e\x43reateInstance\x12\x37.google.spanner.admin.instance.v1.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"\xb1\x01\x82\xd3\xe4\x93\x02&"!/v1/{parent=projects/*}/instances:\x01*\xda\x41\x1bparent,instance_id,instance\xca\x41\x64\n)google.spanner.admin.instance.v1.Instance\x12\x37google.spanner.admin.instance.v1.CreateInstanceMetadata\x12\x9d\x02\n\x0eUpdateInstance\x12\x37.google.spanner.admin.instance.v1.UpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"\xb2\x01\x82\xd3\xe4\x93\x02/2*/v1/{instance.name=projects/*/instances/*}:\x01*\xda\x41\x13instance,field_mask\xca\x41\x64\n)google.spanner.admin.instance.v1.Instance\x12\x37google.spanner.admin.instance.v1.UpdateInstanceMetadata\x12\x93\x01\n\x0e\x44\x65leteInstance\x12\x37.google.spanner.admin.instance.v1.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v1/{name=projects/*/instances/*}\xda\x41\x04name\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v1/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v1/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v1/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1ax\xca\x41\x16spanner.googleapis.com\xd2\x41\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.adminB\xdf\x01\n$com.google.spanner.admin.instance.v1B\x19SpannerInstanceAdminProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance\xaa\x02&Google.Cloud.Spanner.Admin.Instance.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Instance\\V1b\x06proto3' - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_REPLICAINFO_REPLICATYPE = _descriptor.EnumDescriptor( - name="ReplicaType", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="READ_WRITE", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="READ_ONLY", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="WITNESS", index=3, number=3, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=559, - serialized_end=638, -) -_sym_db.RegisterEnumDescriptor(_REPLICAINFO_REPLICATYPE) - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.spanner.admin.instance.v1.Instance.State", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="READY", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1194, - serialized_end=1249, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - - -_REPLICAINFO = _descriptor.Descriptor( - name="ReplicaInfo", - full_name="google.spanner.admin.instance.v1.ReplicaInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="location", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.location", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="default_leader_location", - full_name="google.spanner.admin.instance.v1.ReplicaInfo.default_leader_location", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_REPLICAINFO_REPLICATYPE], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=420, - serialized_end=638, -) - - -_INSTANCECONFIG = _descriptor.Descriptor( - name="InstanceConfig", - full_name="google.spanner.admin.instance.v1.InstanceConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.InstanceConfig.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.admin.instance.v1.InstanceConfig.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="replicas", - full_name="google.spanner.admin.instance.v1.InstanceConfig.replicas", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b( - "\352A]\n%spanner.googleapis.com/InstanceConfig\0224projects/{project}/instanceConfigs/{instance_config}" - ), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=641, - serialized_end=856, -) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.spanner.admin.instance.v1.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1147, - serialized_end=1192, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.spanner.admin.instance.v1.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="config", - full_name="google.spanner.admin.instance.v1.Instance.config", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\372A'\n%spanner.googleapis.com/InstanceConfig"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.admin.instance.v1.Instance.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="node_count", - full_name="google.spanner.admin.instance.v1.Instance.node_count", - index=3, - number=5, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.spanner.admin.instance.v1.Instance.state", - index=4, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.spanner.admin.instance.v1.Instance.labels", - index=5, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="endpoint_uris", - full_name="google.spanner.admin.instance.v1.Instance.endpoint_uris", - index=6, - number=8, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_INSTANCE_LABELSENTRY], - enum_types=[_INSTANCE_STATE], - serialized_options=_b( - "\352AJ\n\037spanner.googleapis.com/Instance\022'projects/{project}/instances/{instance}" - ), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=859, - serialized_end=1328, -) - - -_LISTINSTANCECONFIGSREQUEST = _descriptor.Descriptor( - name="ListInstanceConfigsRequest", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1331, - serialized_end=1467, -) - - -_LISTINSTANCECONFIGSRESPONSE = _descriptor.Descriptor( - name="ListInstanceConfigsResponse", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instance_configs", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse.instance_configs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1470, - serialized_end=1600, -) - - -_GETINSTANCECONFIGREQUEST = _descriptor.Descriptor( - name="GetInstanceConfigRequest", - full_name="google.spanner.admin.instance.v1.GetInstanceConfigRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.GetInstanceConfigRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A'\n%spanner.googleapis.com/InstanceConfig" - ), - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1602, - serialized_end=1689, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A!\n\037spanner.googleapis.com/Instance" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.spanner.admin.instance.v1.GetInstanceRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1691, - serialized_end=1814, -) - - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1817, - serialized_end=2002, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project" - ), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.spanner.admin.instance.v1.ListInstancesRequest.filter", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2005, - serialized_end=2151, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2153, - serialized_end=2264, -) - - -_UPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="UpdateInstanceRequest", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="field_mask", - full_name="google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b("\340A\002"), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2267, - serialized_end=2410, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.spanner.admin.instance.v1.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.admin.instance.v1.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=_b( - "\340A\002\372A!\n\037spanner.googleapis.com/Instance" - ), - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2412, - serialized_end=2490, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cancel_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.cancel_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.spanner.admin.instance.v1.CreateInstanceMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2493, - serialized_end=2722, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="cancel_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.spanner.admin.instance.v1.UpdateInstanceMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2725, - serialized_end=2954, -) - -_REPLICAINFO.fields_by_name["type"].enum_type = _REPLICAINFO_REPLICATYPE -_REPLICAINFO_REPLICATYPE.containing_type = _REPLICAINFO -_INSTANCECONFIG.fields_by_name["replicas"].message_type = _REPLICAINFO -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_LISTINSTANCECONFIGSRESPONSE.fields_by_name[ - "instance_configs" -].message_type = _INSTANCECONFIG -_GETINSTANCEREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATEINSTANCEREQUEST.fields_by_name["instance"].message_type = _INSTANCE -_LISTINSTANCESRESPONSE.fields_by_name["instances"].message_type = _INSTANCE -_UPDATEINSTANCEREQUEST.fields_by_name["instance"].message_type = _INSTANCE -_UPDATEINSTANCEREQUEST.fields_by_name[ - "field_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATEINSTANCEMETADATA.fields_by_name["instance"].message_type = _INSTANCE -_CREATEINSTANCEMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "cancel_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name["instance"].message_type = _INSTANCE -_UPDATEINSTANCEMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "cancel_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["ReplicaInfo"] = _REPLICAINFO -DESCRIPTOR.message_types_by_name["InstanceConfig"] = _INSTANCECONFIG -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name[ - "ListInstanceConfigsRequest" -] = _LISTINSTANCECONFIGSREQUEST -DESCRIPTOR.message_types_by_name[ - "ListInstanceConfigsResponse" -] = _LISTINSTANCECONFIGSRESPONSE -DESCRIPTOR.message_types_by_name["GetInstanceConfigRequest"] = _GETINSTANCECONFIGREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateInstanceRequest"] = _UPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReplicaInfo = _reflection.GeneratedProtocolMessageType( - "ReplicaInfo", - (_message.Message,), - dict( - DESCRIPTOR=_REPLICAINFO, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""Protocol buffer. - - Attributes: - location: - The location of the serving resources, e.g. "us-central1". - type: - The type of replica. - default_leader_location: - If true, this location is designated as the default leader - location where leader replicas are placed. See the `region - types documentation `__ for more details. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ReplicaInfo) - ), -) -_sym_db.RegisterMessage(ReplicaInfo) - -InstanceConfig = _reflection.GeneratedProtocolMessageType( - "InstanceConfig", - (_message.Message,), - dict( - DESCRIPTOR=_INSTANCECONFIG, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""A possible configuration for a Cloud Spanner instance. - Configurations define the geographic placement of nodes and their - replication. - - - Attributes: - name: - A unique identifier for the instance configuration. Values are - of the form - ``projects//instanceConfigs/[a-z][-a-z0-9]*`` - display_name: - The name of this instance configuration as it appears in UIs. - replicas: - The geographic placement of nodes in this instance - configuration and their replication properties. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.InstanceConfig) - ), -) -_sym_db.RegisterMessage(InstanceConfig) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - dict( - LabelsEntry=_reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_INSTANCE_LABELSENTRY, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance.LabelsEntry) - ), - ), - DESCRIPTOR=_INSTANCE, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""An isolated set of Cloud Spanner resources on which - databases can be hosted. - - - Attributes: - name: - Required. A unique identifier for the instance, which cannot - be changed after the instance is created. Values are of the - form ``projects//instances/[a-z][-a-z0-9]*[a-z0-9]``. - The final segment of the name must be between 2 and 64 - characters in length. - config: - Required. The name of the instance's configuration. Values are - of the form - ``projects//instanceConfigs/``. See - also [InstanceConfig][google.spanner.admin.instance.v1.Instanc - eConfig] and [ListInstanceConfigs][google.spanner.admin.instan - ce.v1.InstanceAdmin.ListInstanceConfigs]. - display_name: - Required. The descriptive name for this instance as it appears - in UIs. Must be unique per project and between 4 and 30 - characters in length. - node_count: - Required. The number of nodes allocated to this instance. This - may be zero in API responses for instances that are not yet in - state ``READY``. See `the documentation `__ for more - information about nodes. - state: - Output only. The current instance state. For [CreateInstance][ - google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] - , the state must be either omitted or set to ``CREATING``. For - [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmi - n.UpdateInstance], the state must be either omitted or set to - ``READY``. - labels: - Cloud Labels are a flexible and lightweight mechanism for - organizing cloud resources into groups that reflect a - customer's organizational needs and deployment strategies. - Cloud Labels can be used to filter collections of resources. - They can be used to control how resource metrics are - aggregated. And they can be used as arguments to policy - management rules (e.g. route, firewall, load balancing, etc.). - - Label keys must be between 1 and 63 characters long and - must conform to the following regular expression: - ``[a-z]([-a-z0-9]*[a-z0-9])?``. - Label values must be - between 0 and 63 characters long and must conform to the - regular expression ``([a-z]([-a-z0-9]*[a-z0-9])?)?``. - No - more than 64 labels can be associated with a given resource. - See https://goo.gl/xmQnxf for more information on and examples - of labels. If you plan to use labels in your own code, please - note that additional characters may be allowed in the future. - And so you are advised to use an internal label - representation, such as JSON, which doesn't rely upon specific - characters being disallowed. For example, representing labels - as the string: name + "*" + value would prove problematic if - we were to allow "*" in a future release. - endpoint_uris: - Output only. The endpoint URIs based on the instance config. - For example, instances located in a specific cloud region (or - multi region) such as nam3, would have a nam3 specific - endpoint URI. This URI is to be used implictly by SDK clients, - with fallback to default URI. These endpoints are intended to - optimize the network routing between the client and the - instance's serving resources. If multiple endpoints are - present, client may establish connections using any of the - given URIs. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance) - ), -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -ListInstanceConfigsRequest = _reflection.GeneratedProtocolMessageType( - "ListInstanceConfigsRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCECONFIGSREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. - - - Attributes: - parent: - Required. The name of the project for which a list of - supported instance configurations is requested. Values are of - the form ``projects/``. - page_size: - Number of instance configurations to be returned in the - response. If 0 or less, defaults to the server's maximum - allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.instance.v1.ListInstanceConfigsRespon - se.next\_page\_token] from a previous [ListInstanceConfigsResp - onse][google.spanner.admin.instance.v1.ListInstanceConfigsResp - onse]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsRequest) - ), -) -_sym_db.RegisterMessage(ListInstanceConfigsRequest) - -ListInstanceConfigsResponse = _reflection.GeneratedProtocolMessageType( - "ListInstanceConfigsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCECONFIGSRESPONSE, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The response for - [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. - - - Attributes: - instance_configs: - The list of requested instance configurations. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListInstanc - eConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListI - nstanceConfigs] call to fetch more of the matching instance - configurations. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstanceConfigsResponse) - ), -) -_sym_db.RegisterMessage(ListInstanceConfigsResponse) - -GetInstanceConfigRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceConfigRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETINSTANCECONFIGREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. - - - Attributes: - name: - Required. The name of the requested instance configuration. - Values are of the form - ``projects//instanceConfigs/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceConfigRequest) - ), -) -_sym_db.RegisterMessage(GetInstanceConfigRequest) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETINSTANCEREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. - - - Attributes: - name: - Required. The name of the requested instance. Values are of - the form ``projects//instances/``. - field_mask: - If field\_mask is present, specifies the subset of - [][Instance] fields that should be returned. If absent, all - [][Instance] fields are returned. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceRequest) - ), -) -_sym_db.RegisterMessage(GetInstanceRequest) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. - - - Attributes: - parent: - Required. The name of the project in which to create the - instance. Values are of the form ``projects/``. - instance_id: - Required. The ID of the instance to create. Valid identifiers - are of the form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be - between 2 and 64 characters in length. - instance: - Required. The instance to create. The name may be omitted, but - if specified must be ``/instances/``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceRequest) - ), -) -_sym_db.RegisterMessage(CreateInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. - - - Attributes: - parent: - Required. The name of the project for which a list of - instances is requested. Values are of the form - ``projects/``. - page_size: - Number of instances to be returned in the response. If 0 or - less, defaults to the server's maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.instance.v1.ListInstancesResponse.nex - t\_page\_token] from a previous [ListInstancesResponse][google - .spanner.admin.instance.v1.ListInstancesResponse]. - filter: - An expression for filtering the results of the request. Filter - rules are case insensitive. The fields eligible for filtering - are: - ``name`` - ``display_name`` - ``labels.key`` where - key is the name of a label Some examples of using filters - are: - ``name:*`` --> The instance has a name. - - ``name:Howl`` --> The instance's name contains the string - "howl". - ``name:HOWL`` --> Equivalent to above. - - ``NAME:howl`` --> Equivalent to above. - ``labels.env:*`` --> - The instance has the label "env". - ``labels.env:dev`` --> - The instance has the label "env" and the value of the label - contains the string "dev". - ``name:howl labels.env:dev`` --> - The instance's name contains "howl" and it has the label - "env" with its value containing "dev". - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesRequest) - ), -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESRESPONSE, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The response for - [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. - - - Attributes: - instances: - The list of requested instances. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListInstanc - es][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanc - es] call to fetch more of the matching instances. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.ListInstancesResponse) - ), -) -_sym_db.RegisterMessage(ListInstancesResponse) - -UpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEINSTANCEREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. - - - Attributes: - instance: - Required. The instance to update, which must always include - the instance name. Otherwise, only fields mentioned in [][goog - le.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask - ] need be included. - field_mask: - Required. A mask specifying which fields in [][google.spanner. - admin.instance.v1.UpdateInstanceRequest.instance] should be - updated. The field mask must always be specified; this - prevents any future fields in - [][google.spanner.admin.instance.v1.Instance] from being - erased accidentally by clients that do not know about them. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceRequest) - ), -) -_sym_db.RegisterMessage(UpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETEINSTANCEREQUEST, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""The request for - [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. - - - Attributes: - name: - Required. The name of the instance to be deleted. Values are - of the form ``projects//instances/`` - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.DeleteInstanceRequest) - ), -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEMETADATA, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""Metadata type for the operation returned by - [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. - - - Attributes: - instance: - The instance being created. - start_time: - The time at which the [CreateInstance][google.spanner.admin.in - stance.v1.InstanceAdmin.CreateInstance] request was received. - cancel_time: - The time at which this operation was cancelled. If set, this - operation is in the process of undoing itself (which is - guaranteed to succeed) and cannot be cancelled again. - end_time: - The time at which this operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.CreateInstanceMetadata) - ), -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_UPDATEINSTANCEMETADATA, - __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""Metadata type for the operation returned by - [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. - - - Attributes: - instance: - The desired end state of the update. - start_time: - The time at which [UpdateInstance][google.spanner.admin.instan - ce.v1.InstanceAdmin.UpdateInstance] request was received. - cancel_time: - The time at which this operation was cancelled. If set, this - operation is in the process of undoing itself (which is - guaranteed to succeed) and cannot be cancelled again. - end_time: - The time at which this operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceMetadata) - ), -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - - -DESCRIPTOR._options = None -_INSTANCECONFIG._options = None -_INSTANCE_LABELSENTRY._options = None -_INSTANCE.fields_by_name["config"]._options = None -_INSTANCE._options = None -_LISTINSTANCECONFIGSREQUEST.fields_by_name["parent"]._options = None -_GETINSTANCECONFIGREQUEST.fields_by_name["name"]._options = None -_GETINSTANCEREQUEST.fields_by_name["name"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None -_UPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_UPDATEINSTANCEREQUEST.fields_by_name["field_mask"]._options = None -_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None - -_INSTANCEADMIN = _descriptor.ServiceDescriptor( - name="InstanceAdmin", - full_name="google.spanner.admin.instance.v1.InstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\026spanner.googleapis.com\322A\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.admin" - ), - serialized_start=2957, - serialized_end=5068, - methods=[ - _descriptor.MethodDescriptor( - name="ListInstanceConfigs", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs", - index=0, - containing_service=None, - input_type=_LISTINSTANCECONFIGSREQUEST, - output_type=_LISTINSTANCECONFIGSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002)\022'/v1/{parent=projects/*}/instanceConfigs\332A\006parent" - ), - ), - _descriptor.MethodDescriptor( - name="GetInstanceConfig", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig", - index=1, - containing_service=None, - input_type=_GETINSTANCECONFIGREQUEST, - output_type=_INSTANCECONFIG, - serialized_options=_b( - "\202\323\344\223\002)\022'/v1/{name=projects/*/instanceConfigs/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v1/{parent=projects/*}/instances\332A\006parent" - ), - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetInstance", - index=3, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=_INSTANCE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v1/{name=projects/*/instances/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance", - index=4, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002&"!/v1/{parent=projects/*}/instances:\001*\332A\033parent,instance_id,instance\312Ad\n)google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v1.CreateInstanceMetadata' - ), - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance", - index=5, - containing_service=None, - input_type=_UPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002/2*/v1/{instance.name=projects/*/instances/*}:\001*\332A\023instance,field_mask\312Ad\n)google.spanner.admin.instance.v1.Instance\0227google.spanner.admin.instance.v1.UpdateInstanceMetadata" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance", - index=6, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002#*!/v1/{name=projects/*/instances/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy", - index=7, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v1/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy' - ), - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy", - index=8, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v1/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource' - ), - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions", - index=9, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002="8/v1/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_INSTANCEADMIN) - -DESCRIPTOR.services_by_name["InstanceAdmin"] = _INSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py b/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py deleted file mode 100644 index b7276a9f9252..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,343 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class InstanceAdminStub(object): - """Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ListInstanceConfigs = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.FromString, - ) - self.GetInstanceConfig = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.FromString, - ) - self.CreateInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class InstanceAdminServicer(object): - """Cloud Spanner Instance Admin API - - The Cloud Spanner Instance Admin API can be used to create, delete, - modify and list instances. Instances are dedicated Cloud Spanner serving - and storage resources to be used by Cloud Spanner databases. - - Each instance has a "configuration", which dictates where the - serving resources for the Cloud Spanner instance are located (e.g., - US-central, Europe). Configurations are created by Google based on - resource availability. - - Cloud Spanner billing is based on the instances that exist and their - sizes. After an instance exists, there are no additional - per-database or per-operation charges for use of the instance - (though there may be additional network bandwidth charges). - Instances offer isolation: problems with databases in one instance - will not affect other instances. However, within an instance - databases can affect each other. For example, if one database in an - instance receives a lot of requests and consumes most of the - instance resources, fewer resources are available for other - databases in that instance, and their performance may suffer. - """ - - def ListInstanceConfigs(self, request, context): - """Lists the supported instance configurations for a given project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstanceConfig(self, request, context): - """Gets information about a particular instance configuration. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists all instances in the given project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about a particular instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateInstance(self, request, context): - """Creates an instance and begins preparing it to begin serving. The - returned [long-running operation][google.longrunning.Operation] - can be used to track the progress of preparing the new - instance. The instance name is assigned by the caller. If the - named instance already exists, `CreateInstance` returns - `ALREADY_EXISTS`. - - Immediately upon completion of this request: - - * The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is `CREATING`. - - Until completion of the returned operation: - - * Cancelling the operation renders the instance immediately unreadable - via the API. - * The instance can be deleted. - * All other attempts to modify the instance are rejected. - - Upon completion of the returned operation: - - * Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - * Databases can be created in the instance. - * The instance's allocated resource levels are readable via the API. - * The instance's state becomes `READY`. - - The returned [long-running operation][google.longrunning.Operation] will - have a name of the format `/operations/` and - can be used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type is - [Instance][google.spanner.admin.instance.v1.Instance], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance, and begins allocating or releasing resources - as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track the - progress of updating the instance. If the named instance does not - exist, returns `NOT_FOUND`. - - Immediately upon completion of this request: - - * For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. - - Until completion of the returned operation: - - * Cancelling the operation sets its metadata's - [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, - after which point it terminates with a `CANCELLED` status. - * All other attempts to modify the instance are rejected. - * Reading the instance via the API continues to give the pre-request - resource levels. - - Upon completion of the returned operation: - - * Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - * All newly-reserved resources are available for serving the instance's - tables. - * The instance's new resource levels are readable via the API. - - The returned [long-running operation][google.longrunning.Operation] will - have a name of the format `/operations/` and - can be used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is - [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type is - [Instance][google.spanner.admin.instance.v1.Instance], if successful. - - Authorization requires `spanner.instances.update` permission on - resource [name][google.spanner.admin.instance.v1.Instance.name]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Deletes an instance. - - Immediately upon completion of the request: - - * Billing ceases for all of the instance's reserved resources. - - Soon afterward: - - * The instance and *all of its databases* immediately and - irrevocably disappear from the API. All data in the databases - is permanently deleted. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - - Authorization requires `spanner.instances.setIamPolicy` on - [resource][google.iam.v1.SetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Authorization requires `spanner.instances.getIamPolicy` on - [resource][google.iam.v1.GetIamPolicyRequest.resource]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. - - Attempting this RPC on a non-existent Cloud Spanner instance resource will - result in a NOT_FOUND error if the user has `spanner.instances.list` - permission on the containing Google Cloud Project. Otherwise returns an - empty set of permissions. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_InstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "ListInstanceConfigs": grpc.unary_unary_rpc_method_handler( - servicer.ListInstanceConfigs, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstanceConfigsResponse.SerializeToString, - ), - "GetInstanceConfig": grpc.unary_unary_rpc_method_handler( - servicer.GetInstanceConfig, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceConfigRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.InstanceConfig.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.Instance.SerializeToString, - ), - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.UpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_instance__v1_dot_proto_dot_spanner__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.admin.instance.v1.InstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/spanner/google/cloud/spanner_admin_instance_v1/types.py b/spanner/google/cloud/spanner_admin_instance_v1/types.py deleted file mode 100644 index a20b479bf04e..000000000000 --- a/spanner/google/cloud/spanner_admin_instance_v1/types.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - - -from google.api import http_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.iam.v1.logging import audit_data_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 - - -_shared_modules = [ - http_pb2, - iam_policy_pb2, - policy_pb2, - audit_data_pb2, - operations_pb2, - any_pb2, - descriptor_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [spanner_instance_admin_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_admin_instance_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/spanner/google/cloud/spanner_v1/__init__.py b/spanner/google/cloud/spanner_v1/__init__.py deleted file mode 100644 index 8611405cd6cf..000000000000 --- a/spanner/google/cloud/spanner_v1/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2017, Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -import pkg_resources - -__version__ = pkg_resources.get_distribution("google-cloud-spanner").version - -from google.cloud.spanner_v1 import param_types -from google.cloud.spanner_v1 import types -from google.cloud.spanner_v1.client import Client -from google.cloud.spanner_v1.gapic import enums -from google.cloud.spanner_v1.keyset import KeyRange -from google.cloud.spanner_v1.keyset import KeySet -from google.cloud.spanner_v1.pool import AbstractSessionPool -from google.cloud.spanner_v1.pool import BurstyPool -from google.cloud.spanner_v1.pool import FixedSizePool -from google.cloud.spanner_v1.pool import PingingPool -from google.cloud.spanner_v1.pool import TransactionPingingPool - - -COMMIT_TIMESTAMP = "spanner.commit_timestamp()" -"""Placeholder be used to store commit timestamp of a transaction in a column. - -This value can only be used for timestamp columns that have set the option -``(allow_commit_timestamp=true)`` in the schema. -""" - - -__all__ = ( - # google.cloud.spanner_v1 - "__version__", - "param_types", - "types", - # google.cloud.spanner_v1.client - "Client", - # google.cloud.spanner_v1.keyset - "KeyRange", - "KeySet", - # google.cloud.spanner_v1.pool - "AbstractSessionPool", - "BurstyPool", - "FixedSizePool", - "PingingPool", - "TransactionPingingPool", - # google.cloud.spanner_v1.gapic - "enums", - # local - "COMMIT_TIMESTAMP", -) diff --git a/spanner/google/cloud/spanner_v1/_helpers.py b/spanner/google/cloud/spanner_v1/_helpers.py deleted file mode 100644 index 3b7fd586c9a5..000000000000 --- a/spanner/google/cloud/spanner_v1/_helpers.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for Cloud Spanner.""" - -import datetime -import math - -import six - -from google.protobuf.struct_pb2 import ListValue -from google.protobuf.struct_pb2 import Value - -from google.api_core import datetime_helpers -from google.cloud._helpers import _date_from_iso8601_date -from google.cloud._helpers import _datetime_to_rfc3339 -from google.cloud.spanner_v1.proto import type_pb2 - - -def _try_to_coerce_bytes(bytestring): - """Try to coerce a byte string into the right thing based on Python - version and whether or not it is base64 encoded. - - Return a text string or raise ValueError. - """ - # Attempt to coerce using google.protobuf.Value, which will expect - # something that is utf-8 (and base64 consistently is). - try: - Value(string_value=bytestring) - return bytestring - except ValueError: - raise ValueError( - "Received a bytes that is not base64 encoded. " - "Ensure that you either send a Unicode string or a " - "base64-encoded bytes." - ) - - -# pylint: disable=too-many-return-statements,too-many-branches -def _make_value_pb(value): - """Helper for :func:`_make_list_value_pbs`. - - :type value: scalar value - :param value: value to convert - - :rtype: :class:`~google.protobuf.struct_pb2.Value` - :returns: value protobufs - :raises ValueError: if value is not of a known scalar type. - """ - if value is None: - return Value(null_value="NULL_VALUE") - if isinstance(value, (list, tuple)): - return Value(list_value=_make_list_value_pb(value)) - if isinstance(value, bool): - return Value(bool_value=value) - if isinstance(value, six.integer_types): - return Value(string_value=str(value)) - if isinstance(value, float): - if math.isnan(value): - return Value(string_value="NaN") - if math.isinf(value): - if value > 0: - return Value(string_value="Infinity") - else: - return Value(string_value="-Infinity") - return Value(number_value=value) - if isinstance(value, datetime_helpers.DatetimeWithNanoseconds): - return Value(string_value=value.rfc3339()) - if isinstance(value, datetime.datetime): - return Value(string_value=_datetime_to_rfc3339(value)) - if isinstance(value, datetime.date): - return Value(string_value=value.isoformat()) - if isinstance(value, six.binary_type): - value = _try_to_coerce_bytes(value) - return Value(string_value=value) - if isinstance(value, six.text_type): - return Value(string_value=value) - if isinstance(value, ListValue): - return Value(list_value=value) - raise ValueError("Unknown type: %s" % (value,)) - - -# pylint: enable=too-many-return-statements,too-many-branches - - -def _make_list_value_pb(values): - """Construct of ListValue protobufs. - - :type values: list of scalar - :param values: Row data - - :rtype: :class:`~google.protobuf.struct_pb2.ListValue` - :returns: protobuf - """ - return ListValue(values=[_make_value_pb(value) for value in values]) - - -def _make_list_value_pbs(values): - """Construct a sequence of ListValue protobufs. - - :type values: list of list of scalar - :param values: Row data - - :rtype: list of :class:`~google.protobuf.struct_pb2.ListValue` - :returns: sequence of protobufs - """ - return [_make_list_value_pb(row) for row in values] - - -# pylint: disable=too-many-branches -def _parse_value_pb(value_pb, field_type): - """Convert a Value protobuf to cell data. - - :type value_pb: :class:`~google.protobuf.struct_pb2.Value` - :param value_pb: protobuf to convert - - :type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type` - :param field_type: type code for the value - - :rtype: varies on field_type - :returns: value extracted from value_pb - :raises ValueError: if unknown type is passed - """ - if value_pb.HasField("null_value"): - return None - if field_type.code == type_pb2.STRING: - result = value_pb.string_value - elif field_type.code == type_pb2.BYTES: - result = value_pb.string_value.encode("utf8") - elif field_type.code == type_pb2.BOOL: - result = value_pb.bool_value - elif field_type.code == type_pb2.INT64: - result = int(value_pb.string_value) - elif field_type.code == type_pb2.FLOAT64: - if value_pb.HasField("string_value"): - result = float(value_pb.string_value) - else: - result = value_pb.number_value - elif field_type.code == type_pb2.DATE: - result = _date_from_iso8601_date(value_pb.string_value) - elif field_type.code == type_pb2.TIMESTAMP: - DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds - result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value) - elif field_type.code == type_pb2.ARRAY: - result = [ - _parse_value_pb(item_pb, field_type.array_element_type) - for item_pb in value_pb.list_value.values - ] - elif field_type.code == type_pb2.STRUCT: - result = [ - _parse_value_pb(item_pb, field_type.struct_type.fields[i].type) - for (i, item_pb) in enumerate(value_pb.list_value.values) - ] - else: - raise ValueError("Unknown type: %s" % (field_type,)) - return result - - -# pylint: enable=too-many-branches - - -def _parse_list_value_pbs(rows, row_type): - """Convert a list of ListValue protobufs into a list of list of cell data. - - :type rows: list of :class:`~google.protobuf.struct_pb2.ListValue` - :param rows: row data returned from a read/query - - :type row_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.StructType` - :param row_type: row schema specification - - :rtype: list of list of cell data - :returns: data for the rows, coerced into appropriate types - """ - result = [] - for row in rows: - row_data = [] - for value_pb, field in zip(row.values, row_type.fields): - row_data.append(_parse_value_pb(value_pb, field.type)) - result.append(row_data) - return result - - -class _SessionWrapper(object): - """Base class for objects wrapping a session. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session used to perform the commit - """ - - def __init__(self, session): - self._session = session - - -def _metadata_with_prefix(prefix, **kw): - """Create RPC metadata containing a prefix. - - Args: - prefix (str): appropriate resource path. - - Returns: - List[Tuple[str, str]]: RPC metadata with supplied prefix - """ - return [("google-cloud-resource-prefix", prefix)] diff --git a/spanner/google/cloud/spanner_v1/batch.py b/spanner/google/cloud/spanner_v1/batch.py deleted file mode 100644 index e62763d7fd7c..000000000000 --- a/spanner/google/cloud/spanner_v1/batch.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Context manager for Cloud Spanner batched writes.""" - -from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions - -# pylint: disable=ungrouped-imports -from google.cloud._helpers import _pb_timestamp_to_datetime -from google.cloud.spanner_v1._helpers import _SessionWrapper -from google.cloud.spanner_v1._helpers import _make_list_value_pbs -from google.cloud.spanner_v1._helpers import _metadata_with_prefix - -# pylint: enable=ungrouped-imports - - -class _BatchBase(_SessionWrapper): - """Accumulate mutations for transmission during :meth:`commit`. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session used to perform the commit - """ - - def __init__(self, session): - super(_BatchBase, self).__init__(session) - self._mutations = [] - - def _check_state(self): - """Helper for :meth:`commit` et al. - - Subclasses must override - - :raises: :exc:`ValueError` if the object's state is invalid for making - API requests. - """ - raise NotImplementedError - - def insert(self, table, columns, values): - """Insert one or more new table rows. - - :type table: str - :param table: Name of the table to be modified. - - :type columns: list of str - :param columns: Name of the table columns to be modified. - - :type values: list of lists - :param values: Values to be modified. - """ - self._mutations.append(Mutation(insert=_make_write_pb(table, columns, values))) - - def update(self, table, columns, values): - """Update one or more existing table rows. - - :type table: str - :param table: Name of the table to be modified. - - :type columns: list of str - :param columns: Name of the table columns to be modified. - - :type values: list of lists - :param values: Values to be modified. - """ - self._mutations.append(Mutation(update=_make_write_pb(table, columns, values))) - - def insert_or_update(self, table, columns, values): - """Insert/update one or more table rows. - - :type table: str - :param table: Name of the table to be modified. - - :type columns: list of str - :param columns: Name of the table columns to be modified. - - :type values: list of lists - :param values: Values to be modified. - """ - self._mutations.append( - Mutation(insert_or_update=_make_write_pb(table, columns, values)) - ) - - def replace(self, table, columns, values): - """Replace one or more table rows. - - :type table: str - :param table: Name of the table to be modified. - - :type columns: list of str - :param columns: Name of the table columns to be modified. - - :type values: list of lists - :param values: Values to be modified. - """ - self._mutations.append(Mutation(replace=_make_write_pb(table, columns, values))) - - def delete(self, table, keyset): - """Delete one or more table rows. - - :type table: str - :param table: Name of the table to be modified. - - :type keyset: :class:`~google.cloud.spanner_v1.keyset.Keyset` - :param keyset: Keys/ranges identifying rows to delete. - """ - delete = Mutation.Delete(table=table, key_set=keyset._to_pb()) - self._mutations.append(Mutation(delete=delete)) - - -class Batch(_BatchBase): - """Accumulate mutations for transmission during :meth:`commit`. - """ - - committed = None - """Timestamp at which the batch was successfully committed.""" - - def _check_state(self): - """Helper for :meth:`commit` et al. - - Subclasses must override - - :raises: :exc:`ValueError` if the object's state is invalid for making - API requests. - """ - if self.committed is not None: - raise ValueError("Batch already committed") - - def commit(self): - """Commit mutations to the database. - - :rtype: datetime - :returns: timestamp of the committed changes. - """ - self._check_state() - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - response = api.commit( - self._session.name, - mutations=self._mutations, - single_use_transaction=txn_options, - metadata=metadata, - ) - self.committed = _pb_timestamp_to_datetime(response.commit_timestamp) - return self.committed - - def __enter__(self): - """Begin ``with`` block.""" - self._check_state() - - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """End ``with`` block.""" - if exc_type is None: - self.commit() - - -def _make_write_pb(table, columns, values): - """Helper for :meth:`Batch.insert` et aliae. - - :type table: str - :param table: Name of the table to be modified. - - :type columns: list of str - :param columns: Name of the table columns to be modified. - - :type values: list of lists - :param values: Values to be modified. - - :rtype: :class:`google.cloud.spanner_v1.proto.mutation_pb2.Mutation.Write` - :returns: Write protobuf - """ - return Mutation.Write( - table=table, columns=columns, values=_make_list_value_pbs(values) - ) diff --git a/spanner/google/cloud/spanner_v1/client.py b/spanner/google/cloud/spanner_v1/client.py deleted file mode 100644 index 264731178ee4..000000000000 --- a/spanner/google/cloud/spanner_v1/client.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parent client for calling the Cloud Spanner API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`~google.cloud.spanner_v1.client.Client` owns an - :class:`~google.cloud.spanner_v1.instance.Instance` -* a :class:`~google.cloud.spanner_v1.instance.Instance` owns a - :class:`~google.cloud.spanner_v1.database.Database` -""" -import warnings - -from google.api_core.gapic_v1 import client_info -import google.api_core.client_options - -# pylint: disable=line-too-long -from google.cloud.spanner_admin_database_v1.gapic.database_admin_client import ( # noqa - DatabaseAdminClient, -) -from google.cloud.spanner_admin_instance_v1.gapic.instance_admin_client import ( # noqa - InstanceAdminClient, -) - -# pylint: enable=line-too-long - -from google.cloud.client import ClientWithProject -from google.cloud.spanner_v1 import __version__ -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT -from google.cloud.spanner_v1.instance import Instance - -_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) -SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" -_USER_AGENT_DEPRECATED = ( - "The 'user_agent' argument to 'Client' is deprecated / unused. " - "Please pass an appropriate 'client_info' instead." -) - - -class InstanceConfig(object): - """Named configurations for Spanner instances. - - :type name: str - :param name: ID of the instance configuration - - :type display_name: str - :param display_name: Name of the instance configuration - """ - - def __init__(self, name, display_name): - self.name = name - self.display_name = display_name - - @classmethod - def from_pb(cls, config_pb): - """Construct an instance from the equvalent protobuf. - - :type config_pb: - :class:`~google.spanner.v1.spanner_instance_admin_pb2.InstanceConfig` - :param config_pb: the protobuf to parse - - :rtype: :class:`InstanceConfig` - :returns: an instance of this class - """ - return cls(config_pb.name, config_pb.display_name) - - -class Client(ClientWithProject): - """Client for interacting with Cloud Spanner API. - - .. note:: - - Since the Cloud Spanner API requires the gRPC transport, no - ``_http`` argument is accepted by this class. - - :type project: :class:`str` or :func:`unicode ` - :param project: (Optional) The ID of the project which owns the - instances, tables and data. If not provided, will - attempt to determine from the environment. - - :type credentials: - :class:`Credentials ` or - :data:`NoneType ` - :param credentials: (Optional) The authorization credentials to attach to requests. - These credentials identify this application to the service. - If none are specified, the client will attempt to ascertain - the credentials from the environment. - - :type client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo` - :param client_info: - (Optional) The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library or - partner tool. - - :type user_agent: str - :param user_agent: - (Deprecated) The user agent to be used with API request. - Not used. - :type client_options: :class:`~google.api_core.client_options.ClientOptions` - or :class:`dict` - :param client_options: (Optional) Client options used to set user options - on the client. API Endpoint should be set through client_options. - - :raises: :class:`ValueError ` if both ``read_only`` - and ``admin`` are :data:`True` - """ - - _instance_admin_api = None - _database_admin_api = None - _endpoint_cache = {} - user_agent = None - _SET_PROJECT = True # Used by from_service_account_json() - - SCOPE = (SPANNER_ADMIN_SCOPE,) - """The scopes required for Google Cloud Spanner.""" - - def __init__( - self, - project=None, - credentials=None, - client_info=_CLIENT_INFO, - user_agent=None, - client_options=None, - ): - # NOTE: This API has no use for the _http argument, but sending it - # will have no impact since the _http() @property only lazily - # creates a working HTTP object. - super(Client, self).__init__( - project=project, credentials=credentials, _http=None - ) - self._client_info = client_info - if client_options and type(client_options) == dict: - self._client_options = google.api_core.client_options.from_dict( - client_options - ) - else: - self._client_options = client_options - - if user_agent is not None: - warnings.warn(_USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2) - self.user_agent = user_agent - - @property - def credentials(self): - """Getter for client's credentials. - - :rtype: - :class:`Credentials ` - :returns: The credentials stored on the client. - """ - return self._credentials - - @property - def project_name(self): - """Project name to be used with Spanner APIs. - - .. note:: - - This property will not change if ``project`` does not, but the - return value is not cached. - - The project name is of the form - - ``"projects/{project}"`` - - :rtype: str - :returns: The project name to be used with the Cloud Spanner Admin - API RPC service. - """ - return "projects/" + self.project - - @property - def instance_admin_api(self): - """Helper for session-related API calls.""" - if self._instance_admin_api is None: - self._instance_admin_api = InstanceAdminClient( - credentials=self.credentials, - client_info=self._client_info, - client_options=self._client_options, - ) - return self._instance_admin_api - - @property - def database_admin_api(self): - """Helper for session-related API calls.""" - if self._database_admin_api is None: - self._database_admin_api = DatabaseAdminClient( - credentials=self.credentials, - client_info=self._client_info, - client_options=self._client_options, - ) - return self._database_admin_api - - def copy(self): - """Make a copy of this client. - - Copies the local data stored as simple types but does not copy the - current state of any open connections with the Cloud Bigtable API. - - :rtype: :class:`.Client` - :returns: A copy of the current client. - """ - return self.__class__(project=self.project, credentials=self._credentials) - - def list_instance_configs(self, page_size=None, page_token=None): - """List available instance configurations for the client's project. - - .. _RPC docs: https://cloud.google.com/spanner/docs/reference/rpc/\ - google.spanner.admin.instance.v1#google.spanner.admin.\ - instance.v1.InstanceAdmin.ListInstanceConfigs - - See `RPC docs`_. - - :type page_size: int - :param page_size: - Optional. The maximum number of configs in each page of results - from this request. Non-positive values are ignored. Defaults - to a sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of configs, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: - Iterator of - :class:`~google.cloud.spanner_v1.instance.InstanceConfig` - resources within the client's project. - """ - metadata = _metadata_with_prefix(self.project_name) - path = "projects/%s" % (self.project,) - page_iter = self.instance_admin_api.list_instance_configs( - path, page_size=page_size, metadata=metadata - ) - page_iter.next_page_token = page_token - page_iter.item_to_value = _item_to_instance_config - return page_iter - - def instance( - self, - instance_id, - configuration_name=None, - display_name=None, - node_count=DEFAULT_NODE_COUNT, - ): - """Factory to create a instance associated with this client. - - :type instance_id: str - :param instance_id: The ID of the instance. - - :type configuration_name: string - :param configuration_name: - (Optional) Name of the instance configuration used to set up the - instance's cluster, in the form: - ``projects//instanceConfigs/``. - **Required** for instances which do not yet exist. - - :type display_name: str - :param display_name: (Optional) The display name for the instance in - the Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the instance ID. - - :type node_count: int - :param node_count: (Optional) The number of nodes in the instance's - cluster; used to set up the instance's cluster. - - :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` - :returns: an instance owned by this client. - """ - return Instance(instance_id, self, configuration_name, node_count, display_name) - - def list_instances(self, filter_="", page_size=None, page_token=None): - """List instances for the client's project. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.InstanceAdmin.ListInstances - - :type filter_: string - :param filter_: (Optional) Filter to select instances listed. See - the ``ListInstancesRequest`` docs above for examples. - - :type page_size: int - :param page_size: - Optional. The maximum number of instances in each page of results - from this request. Non-positive values are ignored. Defaults - to a sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of instances, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: - Iterator of :class:`~google.cloud.spanner_v1.instance.Instance` - resources within the client's project. - """ - metadata = _metadata_with_prefix(self.project_name) - path = "projects/%s" % (self.project,) - page_iter = self.instance_admin_api.list_instances( - path, page_size=page_size, metadata=metadata - ) - page_iter.item_to_value = self._item_to_instance - page_iter.next_page_token = page_token - return page_iter - - def _item_to_instance(self, iterator, instance_pb): - """Convert an instance protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type instance_pb: :class:`~google.spanner.admin.instance.v1.Instance` - :param instance_pb: An instance returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` - :returns: The next instance in the page. - """ - return Instance.from_pb(instance_pb, self) - - -def _item_to_instance_config(iterator, config_pb): # pylint: disable=unused-argument - """Convert an instance config protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type config_pb: - :class:`~google.spanner.admin.instance.v1.InstanceConfig` - :param config_pb: An instance config returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.instance.InstanceConfig` - :returns: The next instance config in the page. - """ - return InstanceConfig.from_pb(config_pb) diff --git a/spanner/google/cloud/spanner_v1/database.py b/spanner/google/cloud/spanner_v1/database.py deleted file mode 100644 index 49abe919d5fa..000000000000 --- a/spanner/google/cloud/spanner_v1/database.py +++ /dev/null @@ -1,863 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Cloud Spanner Database.""" - -import copy -import functools -import os -import re -import threading -import warnings - -from google.api_core.client_options import ClientOptions -import google.auth.credentials -from google.protobuf.struct_pb2 import Struct -from google.cloud.exceptions import NotFound -from google.api_core.exceptions import PermissionDenied -import six - -# pylint: disable=ungrouped-imports -from google.cloud.spanner_v1._helpers import _make_value_pb -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1.batch import Batch -from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient -from google.cloud.spanner_v1.keyset import KeySet -from google.cloud.spanner_v1.pool import BurstyPool -from google.cloud.spanner_v1.pool import SessionCheckout -from google.cloud.spanner_v1.session import Session -from google.cloud.spanner_v1.snapshot import _restart_on_unavailable -from google.cloud.spanner_v1.snapshot import Snapshot -from google.cloud.spanner_v1.streamed import StreamedResultSet -from google.cloud.spanner_v1.proto.transaction_pb2 import ( - TransactionSelector, - TransactionOptions, -) - -# pylint: enable=ungrouped-imports - - -SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data" - - -_DATABASE_NAME_RE = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[a-z][-a-z0-9]*)/" - r"databases/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" -) - - -_RESOURCE_ROUTING_PERMISSIONS_WARNING = ( - "The client library attempted to connect to an endpoint closer to your Cloud Spanner data " - "but was unable to do so. The client library will fall back and route requests to the endpoint " - "given in the client options, which may result in increased latency. " - "We recommend including the scope https://www.googleapis.com/auth/spanner.admin so that the " - "client library can get an instance-specific endpoint and efficiently route requests." -) - - -class ResourceRoutingPermissionsWarning(Warning): - pass - - -class Database(object): - """Representation of a Cloud Spanner Database. - - We can use a :class:`Database` to: - - * :meth:`create` the database - * :meth:`reload` the database - * :meth:`update` the database - * :meth:`drop` the database - - :type database_id: str - :param database_id: The ID of the database. - - :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` - :param instance: The instance that owns the database. - - :type ddl_statements: list of string - :param ddl_statements: (Optional) DDL statements, excluding the - CREATE DATABASE statement. - - :type pool: concrete subclass of - :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. - :param pool: (Optional) session pool to be used by database. If not - passed, the database will construct an instance of - :class:`~google.cloud.spanner_v1.pool.BurstyPool`. - """ - - _spanner_api = None - - def __init__(self, database_id, instance, ddl_statements=(), pool=None): - self.database_id = database_id - self._instance = instance - self._ddl_statements = _check_ddl_statements(ddl_statements) - self._local = threading.local() - - if pool is None: - pool = BurstyPool() - - self._pool = pool - pool.bind(self) - - @classmethod - def from_pb(cls, database_pb, instance, pool=None): - """Creates an instance of this class from a protobuf. - - :type database_pb: - :class:`google.spanner.v2.spanner_instance_admin_pb2.Instance` - :param database_pb: A instance protobuf object. - - :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` - :param instance: The instance that owns the database. - - :type pool: concrete subclass of - :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. - :param pool: (Optional) session pool to be used by database. - - :rtype: :class:`Database` - :returns: The database parsed from the protobuf response. - :raises ValueError: - if the instance name does not match the expected format - or if the parsed project ID does not match the project ID - on the instance's client, or if the parsed instance ID does - not match the instance's ID. - """ - match = _DATABASE_NAME_RE.match(database_pb.name) - if match is None: - raise ValueError( - "Database protobuf name was not in the " "expected format.", - database_pb.name, - ) - if match.group("project") != instance._client.project: - raise ValueError( - "Project ID on database does not match the " - "project ID on the instance's client" - ) - instance_id = match.group("instance_id") - if instance_id != instance.instance_id: - raise ValueError( - "Instance ID on database does not match the " - "Instance ID on the instance" - ) - database_id = match.group("database_id") - - return cls(database_id, instance, pool=pool) - - @property - def name(self): - """Database name used in requests. - - .. note:: - - This property will not change if ``database_id`` does not, but the - return value is not cached. - - The database name is of the form - - ``"projects/../instances/../databases/{database_id}"`` - - :rtype: str - :returns: The database name. - """ - return self._instance.name + "/databases/" + self.database_id - - @property - def ddl_statements(self): - """DDL Statements used to define database schema. - - See - cloud.google.com/spanner/docs/data-definition-language - - :rtype: sequence of string - :returns: the statements - """ - return self._ddl_statements - - @property - def spanner_api(self): - """Helper for session-related API calls.""" - if self._spanner_api is None: - credentials = self._instance._client.credentials - if isinstance(credentials, google.auth.credentials.Scoped): - credentials = credentials.with_scopes((SPANNER_DATA_SCOPE,)) - client_info = self._instance._client._client_info - client_options = self._instance._client._client_options - if ( - os.getenv("GOOGLE_CLOUD_SPANNER_ENABLE_RESOURCE_BASED_ROUTING") - == "true" - ): - endpoint_cache = self._instance._client._endpoint_cache - if self._instance.name in endpoint_cache: - client_options = ClientOptions( - api_endpoint=endpoint_cache[self._instance.name] - ) - else: - try: - api = self._instance._client.instance_admin_api - resp = api.get_instance( - self._instance.name, - field_mask={"paths": ["endpoint_uris"]}, - metadata=_metadata_with_prefix(self.name), - ) - endpoints = resp.endpoint_uris - if endpoints: - endpoint_cache[self._instance.name] = list(endpoints)[0] - client_options = ClientOptions( - api_endpoint=endpoint_cache[self._instance.name] - ) - # If there are no endpoints, use default endpoint. - except PermissionDenied: - warnings.warn( - _RESOURCE_ROUTING_PERMISSIONS_WARNING, - ResourceRoutingPermissionsWarning, - stacklevel=2, - ) - self._spanner_api = SpannerClient( - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - return self._spanner_api - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.database_id == self.database_id and other._instance == self._instance - ) - - def __ne__(self, other): - return not self == other - - def create(self): - """Create this database within its instance - - Inclues any configured schema assigned to :attr:`ddl_statements`. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase - - :rtype: :class:`~google.api_core.operation.Operation` - :returns: a future used to poll the status of the create request - :raises Conflict: if the database already exists - :raises NotFound: if the instance owning the database does not exist - """ - api = self._instance._client.database_admin_api - metadata = _metadata_with_prefix(self.name) - db_name = self.database_id - if "-" in db_name: - db_name = "`%s`" % (db_name,) - - future = api.create_database( - parent=self._instance.name, - create_statement="CREATE DATABASE %s" % (db_name,), - extra_statements=list(self._ddl_statements), - metadata=metadata, - ) - return future - - def exists(self): - """Test whether this database exists. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL - - :rtype: bool - :returns: True if the database exists, else false. - """ - api = self._instance._client.database_admin_api - metadata = _metadata_with_prefix(self.name) - - try: - api.get_database_ddl(self.name, metadata=metadata) - except NotFound: - return False - return True - - def reload(self): - """Reload this database. - - Refresh any configured schema into :attr:`ddl_statements`. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL - - :raises NotFound: if the database does not exist - """ - api = self._instance._client.database_admin_api - metadata = _metadata_with_prefix(self.name) - response = api.get_database_ddl(self.name, metadata=metadata) - self._ddl_statements = tuple(response.statements) - - def update_ddl(self, ddl_statements, operation_id=""): - """Update DDL for this database. - - Apply any configured schema from :attr:`ddl_statements`. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase - - :type ddl_statements: Sequence[str] - :param ddl_statements: a list of DDL statements to use on this database - :type operation_id: str - :param operation_id: (optional) a string ID for the long-running operation - - :rtype: :class:`google.api_core.operation.Operation` - :returns: an operation instance - :raises NotFound: if the database does not exist - """ - client = self._instance._client - api = client.database_admin_api - metadata = _metadata_with_prefix(self.name) - - future = api.update_database_ddl( - self.name, ddl_statements, operation_id=operation_id, metadata=metadata - ) - return future - - def drop(self): - """Drop this database. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase - """ - api = self._instance._client.database_admin_api - metadata = _metadata_with_prefix(self.name) - api.drop_database(self.name, metadata=metadata) - - def execute_partitioned_dml(self, dml, params=None, param_types=None): - """Execute a partitionable DML statement. - - :type dml: str - :param dml: DML statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``dml``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :rtype: int - :returns: Count of rows affected by the DML statement. - """ - if params is not None: - if param_types is None: - raise ValueError("Specify 'param_types' when passing 'params'.") - params_pb = Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} - ) - else: - params_pb = None - - api = self.spanner_api - - txn_options = TransactionOptions( - partitioned_dml=TransactionOptions.PartitionedDml() - ) - - metadata = _metadata_with_prefix(self.name) - - with SessionCheckout(self._pool) as session: - - txn = api.begin_transaction(session.name, txn_options, metadata=metadata) - - txn_selector = TransactionSelector(id=txn.id) - - restart = functools.partial( - api.execute_streaming_sql, - session.name, - dml, - transaction=txn_selector, - params=params_pb, - param_types=param_types, - metadata=metadata, - ) - - iterator = _restart_on_unavailable(restart) - - result_set = StreamedResultSet(iterator) - list(result_set) # consume all partials - - return result_set.stats.row_count_lower_bound - - def session(self, labels=None): - """Factory to create a session for this database. - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for the session. - - :rtype: :class:`~google.cloud.spanner_v1.session.Session` - :returns: a session bound to this database. - """ - return Session(self, labels=labels) - - def snapshot(self, **kw): - """Return an object which wraps a snapshot. - - The wrapper *must* be used as a context manager, with the snapshot - as the value returned by the wrapper. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly - - :type kw: dict - :param kw: - Passed through to - :class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor. - - :rtype: :class:`~google.cloud.spanner_v1.database.SnapshotCheckout` - :returns: new wrapper - """ - return SnapshotCheckout(self, **kw) - - def batch(self): - """Return an object which wraps a batch. - - The wrapper *must* be used as a context manager, with the batch - as the value returned by the wrapper. - - :rtype: :class:`~google.cloud.spanner_v1.database.BatchCheckout` - :returns: new wrapper - """ - return BatchCheckout(self) - - def batch_snapshot(self, read_timestamp=None, exact_staleness=None): - """Return an object which wraps a batch read / query. - - :type read_timestamp: :class:`datetime.datetime` - :param read_timestamp: Execute all reads at the given timestamp. - - :type exact_staleness: :class:`datetime.timedelta` - :param exact_staleness: Execute all reads at a timestamp that is - ``exact_staleness`` old. - - :rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot` - :returns: new wrapper - """ - return BatchSnapshot( - self, read_timestamp=read_timestamp, exact_staleness=exact_staleness - ) - - def run_in_transaction(self, func, *args, **kw): - """Perform a unit of work in a transaction, retrying on abort. - - :type func: callable - :param func: takes a required positional argument, the transaction, - and additional positional / keyword arguments as supplied - by the caller. - - :type args: tuple - :param args: additional positional arguments to be passed to ``func``. - - :type kw: dict - :param kw: (Optional) keyword arguments to be passed to ``func``. - If passed, "timeout_secs" will be removed and used to - override the default retry timeout which defines maximum timestamp - to continue retrying the transaction. - - :rtype: Any - :returns: The return value of ``func``. - - :raises Exception: - reraises any non-ABORT execptions raised by ``func``. - """ - # Sanity check: Is there a transaction already running? - # If there is, then raise a red flag. Otherwise, mark that this one - # is running. - if getattr(self._local, "transaction_running", False): - raise RuntimeError("Spanner does not support nested transactions.") - self._local.transaction_running = True - - # Check out a session and run the function in a transaction; once - # done, flip the sanity check bit back. - try: - with SessionCheckout(self._pool) as session: - return session.run_in_transaction(func, *args, **kw) - finally: - self._local.transaction_running = False - - -class BatchCheckout(object): - """Context manager for using a batch from a database. - - Inside the context manager, checks out a session from the database, - creates a batch from it, making the batch available. - - Caller must *not* use the batch to perform API requests outside the scope - of the context manager. - - :type database: :class:`~google.cloud.spanner.database.Database` - :param database: database to use - """ - - def __init__(self, database): - self._database = database - self._session = self._batch = None - - def __enter__(self): - """Begin ``with`` block.""" - session = self._session = self._database._pool.get() - batch = self._batch = Batch(session) - return batch - - def __exit__(self, exc_type, exc_val, exc_tb): - """End ``with`` block.""" - try: - if exc_type is None: - self._batch.commit() - finally: - self._database._pool.put(self._session) - - -class SnapshotCheckout(object): - """Context manager for using a snapshot from a database. - - Inside the context manager, checks out a session from the database, - creates a snapshot from it, making the snapshot available. - - Caller must *not* use the snapshot to perform API requests outside the - scope of the context manager. - - :type database: :class:`~google.cloud.spanner.database.Database` - :param database: database to use - - :type kw: dict - :param kw: - Passed through to - :class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor. - """ - - def __init__(self, database, **kw): - self._database = database - self._session = None - self._kw = kw - - def __enter__(self): - """Begin ``with`` block.""" - session = self._session = self._database._pool.get() - return Snapshot(session, **self._kw) - - def __exit__(self, exc_type, exc_val, exc_tb): - """End ``with`` block.""" - self._database._pool.put(self._session) - - -class BatchSnapshot(object): - """Wrapper for generating and processing read / query batches. - - :type database: :class:`~google.cloud.spanner.database.Database` - :param database: database to use - - :type read_timestamp: :class:`datetime.datetime` - :param read_timestamp: Execute all reads at the given timestamp. - - :type exact_staleness: :class:`datetime.timedelta` - :param exact_staleness: Execute all reads at a timestamp that is - ``exact_staleness`` old. - """ - - def __init__(self, database, read_timestamp=None, exact_staleness=None): - self._database = database - self._session = None - self._snapshot = None - self._read_timestamp = read_timestamp - self._exact_staleness = exact_staleness - - @classmethod - def from_dict(cls, database, mapping): - """Reconstruct an instance from a mapping. - - :type database: :class:`~google.cloud.spanner.database.Database` - :param database: database to use - - :type mapping: mapping - :param mapping: serialized state of the instance - - :rtype: :class:`BatchSnapshot` - """ - instance = cls(database) - session = instance._session = database.session() - session._session_id = mapping["session_id"] - snapshot = instance._snapshot = session.snapshot() - snapshot._transaction_id = mapping["transaction_id"] - return instance - - def to_dict(self): - """Return state as a dictionary. - - Result can be used to serialize the instance and reconstitute - it later using :meth:`from_dict`. - - :rtype: dict - """ - session = self._get_session() - snapshot = self._get_snapshot() - return { - "session_id": session._session_id, - "transaction_id": snapshot._transaction_id, - } - - def _get_session(self): - """Create session as needed. - - .. note:: - - Caller is responsible for cleaning up the session after - all partitions have been processed. - """ - if self._session is None: - session = self._session = self._database.session() - session.create() - return self._session - - def _get_snapshot(self): - """Create snapshot if needed.""" - if self._snapshot is None: - self._snapshot = self._get_session().snapshot( - read_timestamp=self._read_timestamp, - exact_staleness=self._exact_staleness, - multi_use=True, - ) - self._snapshot.begin() - return self._snapshot - - def read(self, *args, **kw): - """Convenience method: perform read operation via snapshot. - - See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read`. - """ - return self._get_snapshot().read(*args, **kw) - - def execute_sql(self, *args, **kw): - """Convenience method: perform query operation via snapshot. - - See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.execute_sql`. - """ - return self._get_snapshot().execute_sql(*args, **kw) - - def generate_read_batches( - self, - table, - columns, - keyset, - index="", - partition_size_bytes=None, - max_partitions=None, - ): - """Start a partitioned batch read operation. - - Uses the ``PartitionRead`` API request to initiate the partitioned - read. Returns a list of batch information needed to perform the - actual reads. - - :type table: str - :param table: name of the table from which to fetch data - - :type columns: list of str - :param columns: names of columns to be retrieved - - :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` - :param keyset: keys / ranges identifying rows to be retrieved - - :type index: str - :param index: (Optional) name of index to use, rather than the - table's primary key - - :type partition_size_bytes: int - :param partition_size_bytes: - (Optional) desired size for each partition generated. The service - uses this as a hint, the actual partition size may differ. - - :type max_partitions: int - :param max_partitions: - (Optional) desired maximum number of partitions generated. The - service uses this as a hint, the actual number of partitions may - differ. - - :rtype: iterable of dict - :returns: - mappings of information used peform actual partitioned reads via - :meth:`process_read_batch`. - """ - partitions = self._get_snapshot().partition_read( - table=table, - columns=columns, - keyset=keyset, - index=index, - partition_size_bytes=partition_size_bytes, - max_partitions=max_partitions, - ) - - read_info = { - "table": table, - "columns": columns, - "keyset": keyset._to_dict(), - "index": index, - } - for partition in partitions: - yield {"partition": partition, "read": read_info.copy()} - - def process_read_batch(self, batch): - """Process a single, partitioned read. - - :type batch: mapping - :param batch: - one of the mappings returned from an earlier call to - :meth:`generate_read_batches`. - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - """ - kwargs = copy.deepcopy(batch["read"]) - keyset_dict = kwargs.pop("keyset") - kwargs["keyset"] = KeySet._from_dict(keyset_dict) - return self._get_snapshot().read(partition=batch["partition"], **kwargs) - - def generate_query_batches( - self, - sql, - params=None, - param_types=None, - partition_size_bytes=None, - max_partitions=None, - ): - """Start a partitioned query operation. - - Uses the ``PartitionQuery`` API request to start a partitioned - query operation. Returns a list of batch information needed to - peform the actual queries. - - :type sql: str - :param sql: SQL query statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``sql``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :type partition_size_bytes: int - :param partition_size_bytes: - (Optional) desired size for each partition generated. The service - uses this as a hint, the actual partition size may differ. - - :type partition_size_bytes: int - :param partition_size_bytes: - (Optional) desired size for each partition generated. The service - uses this as a hint, the actual partition size may differ. - - :type max_partitions: int - :param max_partitions: - (Optional) desired maximum number of partitions generated. The - service uses this as a hint, the actual number of partitions may - differ. - - :rtype: iterable of dict - :returns: - mappings of information used peform actual partitioned reads via - :meth:`process_read_batch`. - """ - partitions = self._get_snapshot().partition_query( - sql=sql, - params=params, - param_types=param_types, - partition_size_bytes=partition_size_bytes, - max_partitions=max_partitions, - ) - - query_info = {"sql": sql} - if params: - query_info["params"] = params - query_info["param_types"] = param_types - - for partition in partitions: - yield {"partition": partition, "query": query_info} - - def process_query_batch(self, batch): - """Process a single, partitioned query. - - :type batch: mapping - :param batch: - one of the mappings returned from an earlier call to - :meth:`generate_query_batches`. - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - """ - return self._get_snapshot().execute_sql( - partition=batch["partition"], **batch["query"] - ) - - def process(self, batch): - """Process a single, partitioned query or read. - - :type batch: mapping - :param batch: - one of the mappings returned from an earlier call to - :meth:`generate_query_batches`. - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - :raises ValueError: if batch does not contain either 'read' or 'query' - """ - if "query" in batch: - return self.process_query_batch(batch) - if "read" in batch: - return self.process_read_batch(batch) - raise ValueError("Invalid batch") - - def close(self): - """Clean up underlying session. - - .. note:: - - If the transaction has been shared across multiple machines, - calling this on any machine would invalidate the transaction - everywhere. Ideally this would be called when data has been read - from all the partitions. - """ - if self._session is not None: - self._session.delete() - - -def _check_ddl_statements(value): - """Validate DDL Statements used to define database schema. - - See - https://cloud.google.com/spanner/docs/data-definition-language - - :type value: list of string - :param value: DDL statements, excluding the 'CREATE DATABSE' statement - - :rtype: tuple - :returns: tuple of validated DDL statement strings. - :raises ValueError: - if elements in ``value`` are not strings, or if ``value`` contains - a ``CREATE DATABASE`` statement. - """ - if not all(isinstance(line, six.string_types) for line in value): - raise ValueError("Pass a list of strings") - - if any("create database" in line.lower() for line in value): - raise ValueError("Do not pass a 'CREATE DATABASE' statement") - - return tuple(value) diff --git a/spanner/google/cloud/spanner_v1/gapic/__init__.py b/spanner/google/cloud/spanner_v1/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_v1/gapic/enums.py b/spanner/google/cloud/spanner_v1/gapic/enums.py deleted file mode 100644 index 445abc8429c6..000000000000 --- a/spanner/google/cloud/spanner_v1/gapic/enums.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class NullValue(enum.IntEnum): - """ - ``NullValue`` is a singleton enumeration to represent the null value for - the ``Value`` type union. - - The JSON representation for ``NullValue`` is JSON ``null``. - - Attributes: - NULL_VALUE (int): Null value. - """ - - NULL_VALUE = 0 - - -class TypeCode(enum.IntEnum): - """ - ``TypeCode`` is used as part of ``Type`` to indicate the type of a Cloud - Spanner value. - - Each legal value of a type can be encoded to or decoded from a JSON - value, using the encodings described below. All Cloud Spanner values can - be ``null``, regardless of type; ``null``\ s are always encoded as a - JSON ``null``. - - Attributes: - TYPE_CODE_UNSPECIFIED (int): Not specified. - BOOL (int): Encoded as JSON ``true`` or ``false``. - INT64 (int): Encoded as ``string``, in decimal format. - FLOAT64 (int): Encoded as ``number``, or the strings ``"NaN"``, ``"Infinity"``, or - ``"-Infinity"``. - TIMESTAMP (int): Encoded as ``string`` in RFC 3339 timestamp format. The time zone must - be present, and must be ``"Z"``. - - If the schema has the column option ``allow_commit_timestamp=true``, the - placeholder string ``"spanner.commit_timestamp()"`` can be used to - instruct the system to insert the commit timestamp associated with the - transaction commit. - DATE (int): Encoded as ``string`` in RFC 3339 date format. - STRING (int): Encoded as ``string``. - BYTES (int): Encoded as a base64-encoded ``string``, as described in RFC 4648, - section 4. - ARRAY (int): Encoded as ``list``, where the list elements are represented according - to ``array_element_type``. - STRUCT (int): Encoded as ``list``, where list element ``i`` is represented according - to [struct\_type.fields[i]][google.spanner.v1.StructType.fields]. - """ - - TYPE_CODE_UNSPECIFIED = 0 - BOOL = 1 - INT64 = 2 - FLOAT64 = 3 - TIMESTAMP = 4 - DATE = 5 - STRING = 6 - BYTES = 7 - ARRAY = 8 - STRUCT = 9 - - -class ExecuteSqlRequest(object): - class QueryMode(enum.IntEnum): - """ - Mode in which the statement must be processed. - - Attributes: - NORMAL (int): The default mode. Only the statement results are returned. - PLAN (int): This mode returns only the query plan, without any results or - execution statistics information. - PROFILE (int): This mode returns both the query plan and the execution statistics along - with the results. - """ - - NORMAL = 0 - PLAN = 1 - PROFILE = 2 - - -class PlanNode(object): - class Kind(enum.IntEnum): - """ - The kind of ``PlanNode``. Distinguishes between the two different kinds - of nodes that can appear in a query plan. - - Attributes: - KIND_UNSPECIFIED (int): Not specified. - RELATIONAL (int): Denotes a Relational operator node in the expression tree. Relational - operators represent iterative processing of rows during query execution. - For example, a ``TableScan`` operation that reads rows from a table. - SCALAR (int): Denotes a Scalar node in the expression tree. Scalar nodes represent - non-iterable entities in the query plan. For example, constants or - arithmetic operators appearing inside predicate expressions or references - to column names. - """ - - KIND_UNSPECIFIED = 0 - RELATIONAL = 1 - SCALAR = 2 diff --git a/spanner/google/cloud/spanner_v1/gapic/spanner_client.py b/spanner/google/cloud/spanner_v1/gapic/spanner_client.py deleted file mode 100644 index cf6aafd6b6ba..000000000000 --- a/spanner/google/cloud/spanner_v1/gapic/spanner_client.py +++ /dev/null @@ -1,1898 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.spanner.v1 Spanner API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.spanner_v1.gapic import enums -from google.cloud.spanner_v1.gapic import spanner_client_config -from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import mutation_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2_grpc -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version - - -class SpannerClient(object): - """ - Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - SERVICE_ADDRESS = "spanner.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.spanner.v1.Spanner" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpannerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def database_path(cls, project, instance, database): - """Return a fully-qualified database string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}", - project=project, - instance=instance, - database=database, - ) - - @classmethod - def session_path(cls, project, instance, database, session): - """Return a fully-qualified session string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}", - project=project, - instance=instance, - database=database, - session=session, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.SpannerGrpcTransport, - Callable[[~.Credentials, type], ~.SpannerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = spanner_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=spanner_grpc_transport.SpannerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = spanner_grpc_transport.SpannerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_session( - self, - database, - session=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new session. A session can be used to perform transactions - that read and/or modify data in a Cloud Spanner database. Sessions are - meant to be reused for many consecutive transactions. - - Sessions can only execute one transaction at a time. To execute multiple - concurrent read-write/write-only transactions, create multiple sessions. - Note that standalone reads and queries use a transaction internally, and - count toward the one transaction limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner can delete sessions for which no operations are sent for more - than an hour. If a session is deleted, requests to it return - ``NOT_FOUND``. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``"SELECT 1"``. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> response = client.create_session(database) - - Args: - database (str): Required. The database in which the new session is created. - session (Union[dict, ~google.cloud.spanner_v1.types.Session]): The session to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Session` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Session` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_session" not in self._inner_api_calls: - self._inner_api_calls[ - "create_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_session, - default_retry=self._method_configs["CreateSession"].retry, - default_timeout=self._method_configs["CreateSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.CreateSessionRequest(database=database, session=session) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def batch_create_sessions( - self, - database, - session_count, - session_template=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # TODO: Initialize `session_count`: - >>> session_count = 0 - >>> - >>> response = client.batch_create_sessions(database, session_count) - - Args: - database (str): Required. The database in which the new sessions are created. - session_count (int): Required. The number of sessions to be created in this batch call. The - API may return fewer than the requested number of sessions. If a - specific number of sessions are desired, the client can make additional - calls to BatchCreateSessions (adjusting ``session_count`` as necessary). - session_template (Union[dict, ~google.cloud.spanner_v1.types.Session]): Parameters to be applied to each created session. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Session` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.BatchCreateSessionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "batch_create_sessions" not in self._inner_api_calls: - self._inner_api_calls[ - "batch_create_sessions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.batch_create_sessions, - default_retry=self._method_configs["BatchCreateSessions"].retry, - default_timeout=self._method_configs["BatchCreateSessions"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.BatchCreateSessionsRequest( - database=database, - session_count=session_count, - session_template=session_template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["batch_create_sessions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_session( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still alive. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> name = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> response = client.get_session(name) - - Args: - name (str): Required. The name of the session to retrieve. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Session` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_session" not in self._inner_api_calls: - self._inner_api_calls[ - "get_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_session, - default_retry=self._method_configs["GetSession"].retry, - default_timeout=self._method_configs["GetSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.GetSessionRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_sessions( - self, - database, - page_size=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all sessions in a given database. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_sessions(database): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_sessions(database).pages: - ... for element in page: - ... # process element - ... pass - - Args: - database (str): Required. The database in which to list sessions. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - filter_ (str): An expression for filtering the results of the request. Filter rules are - case insensitive. The fields eligible for filtering are: - - - ``labels.key`` where key is the name of a label - - Some examples of using filters are: - - - ``labels.env:*`` --> The session has the label "env". - - ``labels.env:dev`` --> The session has the label "env" and the value - of the label contains the string "dev". - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.spanner_v1.types.Session` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_sessions" not in self._inner_api_calls: - self._inner_api_calls[ - "list_sessions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_sessions, - default_retry=self._method_configs["ListSessions"].retry, - default_timeout=self._method_configs["ListSessions"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ListSessionsRequest( - database=database, page_size=page_size, filter=filter_ - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("database", database)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_sessions"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="sessions", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_session( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> name = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> client.delete_session(name) - - Args: - name (str): Required. The name of the session to delete. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_session" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_session" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_session, - default_retry=self._method_configs["DeleteSession"].retry, - default_timeout=self._method_configs["DeleteSession"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.DeleteSessionRequest(name=name) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_session"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_sql( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - resume_token=None, - query_mode=None, - partition_token=None, - seqno=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; if the - query yields more data than that, the query fails with a - ``FAILED_PRECONDITION`` error. - - Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be fetched in streaming fashion by calling - ``ExecuteStreamingSql`` instead. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> response = client.execute_sql(session, sql) - - Args: - session (str): Required. The session in which the SQL query should be performed. - sql (str): Required. The SQL string. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. - - For queries, if none is provided, the default is a temporary read-only - transaction with strong concurrency. - - Standard DML statements require a read-write transaction. To protect - against replays, single-use transactions are not supported. The caller - must either supply an existing transaction ID or begin a new transaction. - - Partitioned DML requires an existing Partitioned DML transaction ID. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values of - type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL statement parameters. See the definition - of ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - resume_token (bytes): If this request is resuming a previously interrupted SQL statement - execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this enables - the new SQL statement execution to resume where the last one left off. - The rest of the request parameters must exactly match the request that - yielded this token. - query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can - only be set to ``QueryMode.NORMAL``. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact match - for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition\_token. - seqno (long): A per-transaction sequence number used to identify this request. This field - makes each request idempotent such that if the request is received multiple - times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - - Required for DML statements. Ignored for queries. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ResultSet` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_sql" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_sql" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_sql, - default_retry=self._method_configs["ExecuteSql"].retry, - default_timeout=self._method_configs["ExecuteSql"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteSqlRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - resume_token=resume_token, - query_mode=query_mode, - partition_token=partition_token, - seqno=seqno, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_sql"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_streaming_sql( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - resume_token=None, - query_mode=None, - partition_token=None, - seqno=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Like ``ExecuteSql``, except returns the result set as a stream. Unlike - ``ExecuteSql``, there is no limit on the size of the returned result - set. However, no individual row in the result set can exceed 100 MiB, - and no column value can exceed 10 MiB. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> for element in client.execute_streaming_sql(session, sql): - ... # process element - ... pass - - Args: - session (str): Required. The session in which the SQL query should be performed. - sql (str): Required. The SQL string. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. - - For queries, if none is provided, the default is a temporary read-only - transaction with strong concurrency. - - Standard DML statements require a read-write transaction. To protect - against replays, single-use transactions are not supported. The caller - must either supply an existing transaction ID or begin a new transaction. - - Partitioned DML requires an existing Partitioned DML transaction ID. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values of - type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL statement parameters. See the definition - of ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - resume_token (bytes): If this request is resuming a previously interrupted SQL statement - execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this enables - the new SQL statement execution to resume where the last one left off. - The rest of the request parameters must exactly match the request that - yielded this token. - query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can - only be set to ``QueryMode.NORMAL``. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact match - for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition\_token. - seqno (long): A per-transaction sequence number used to identify this request. This field - makes each request idempotent such that if the request is received multiple - times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - - Required for DML statements. Ignored for queries. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.spanner_v1.types.PartialResultSet]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_streaming_sql" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_streaming_sql" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_streaming_sql, - default_retry=self._method_configs["ExecuteStreamingSql"].retry, - default_timeout=self._method_configs["ExecuteStreamingSql"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteSqlRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - resume_token=resume_token, - query_mode=query_mode, - partition_token=partition_token, - seqno=seqno, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_streaming_sql"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def execute_batch_dml( - self, - session, - transaction, - statements, - seqno, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Executes a batch of SQL DML statements. This method allows many - statements to be run with lower latency than submitting them - sequentially with ``ExecuteSql``. - - Statements are executed in sequential order. A request can succeed even - if a statement fails. The ``ExecuteBatchDmlResponse.status`` field in - the response provides information about the statement that failed. - Clients must inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining - statements are not executed. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `transaction`: - >>> transaction = {} - >>> - >>> # TODO: Initialize `statements`: - >>> statements = [] - >>> - >>> # TODO: Initialize `seqno`: - >>> seqno = 0 - >>> - >>> response = client.execute_batch_dml(session, transaction, statements, seqno) - - Args: - session (str): Required. The session in which the DML statements should be performed. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Required. The transaction to use. Must be a read-write transaction. - - To protect against replays, single-use transactions are not supported. The - caller must either supply an existing transaction ID or begin a new - transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - statements (list[Union[dict, ~google.cloud.spanner_v1.types.Statement]]): Required. The list of statements to execute in this batch. Statements - are executed serially, such that the effects of statement ``i`` are - visible to statement ``i+1``. Each statement must be a DML statement. - Execution stops at the first failed statement; the remaining statements - are not executed. - - Callers must provide at least one statement. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Statement` - seqno (long): Required. A per-transaction sequence number used to identify this request. - This field makes each request idempotent such that if the request is - received multiple times, at most one will succeed. - - The sequence number must be monotonically increasing within the - transaction. If a request arrives for the first time with an out-of-order - sequence number, the transaction may be aborted. Replays of previously - handled requests will yield the same response as the first execution. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ExecuteBatchDmlResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "execute_batch_dml" not in self._inner_api_calls: - self._inner_api_calls[ - "execute_batch_dml" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.execute_batch_dml, - default_retry=self._method_configs["ExecuteBatchDml"].retry, - default_timeout=self._method_configs["ExecuteBatchDml"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ExecuteBatchDmlRequest( - session=session, transaction=transaction, statements=statements, seqno=seqno - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["execute_batch_dml"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read( - self, - session, - table, - columns, - key_set, - transaction=None, - index=None, - limit=None, - resume_token=None, - partition_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Reads rows from the database using key lookups and scans, as a simple - key/value style alternative to ``ExecuteSql``. This method cannot be - used to return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a ``FAILED_PRECONDITION`` error. - - Reads inside read-write transactions might return ``ABORTED``. If this - occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be yielded in streaming fashion by calling - ``StreamingRead`` instead. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `columns`: - >>> columns = [] - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> response = client.read(session, table, columns, key_set) - - Args: - session (str): Required. The session in which the read should be performed. - table (str): Required. The name of the table in the database to be read. - columns (list[str]): Required. The columns of ``table`` to be returned for each row matching - this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded in table - primary key order (if ``index`` is empty) or index key order (if - ``index`` is non-empty). If the ``partition_token`` field is not empty, - rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If - ``limit`` is zero, the default is no limit. A limit cannot be specified - if ``partition_token`` is set. - resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last ``PartialResultSet`` - yielded before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request parameters - must exactly match the request that yielded this token. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact match - for the values of fields common to this message and the - PartitionReadRequest message used to create this partition\_token. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.ResultSet` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read" not in self._inner_api_calls: - self._inner_api_calls["read"] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read, - default_retry=self._method_configs["Read"].retry, - default_timeout=self._method_configs["Read"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ReadRequest( - session=session, - table=table, - columns=columns, - key_set=key_set, - transaction=transaction, - index=index, - limit=limit, - resume_token=resume_token, - partition_token=partition_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def streaming_read( - self, - session, - table, - columns, - key_set, - transaction=None, - index=None, - limit=None, - resume_token=None, - partition_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Like ``Read``, except returns the result set as a stream. Unlike - ``Read``, there is no limit on the size of the returned result set. - However, no individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `columns`: - >>> columns = [] - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> for element in client.streaming_read(session, table, columns, key_set): - ... # process element - ... pass - - Args: - session (str): Required. The session in which the read should be performed. - table (str): Required. The name of the table in the database to be read. - columns (list[str]): Required. The columns of ``table`` to be returned for each row matching - this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded in table - primary key order (if ``index`` is empty) or index key order (if - ``index`` is non-empty). If the ``partition_token`` field is not empty, - rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If - ``limit`` is zero, the default is no limit. A limit cannot be specified - if ``partition_token`` is set. - resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last ``PartialResultSet`` - yielded before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request parameters - must exactly match the request that yielded this token. - partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact match - for the values of fields common to this message and the - PartitionReadRequest message used to create this partition\_token. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.spanner_v1.types.PartialResultSet]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "streaming_read" not in self._inner_api_calls: - self._inner_api_calls[ - "streaming_read" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.streaming_read, - default_retry=self._method_configs["StreamingRead"].retry, - default_timeout=self._method_configs["StreamingRead"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.ReadRequest( - session=session, - table=table, - columns=columns, - key_set=key_set, - transaction=transaction, - index=index, - limit=limit, - resume_token=resume_token, - partition_token=partition_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["streaming_read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def begin_transaction( - self, - session, - options_, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Begins a new transaction. This step can often be skipped: ``Read``, - ``ExecuteSql`` and ``Commit`` can begin a new transaction as a - side-effect. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `options_`: - >>> options_ = {} - >>> - >>> response = client.begin_transaction(session, options_) - - Args: - session (str): Required. The session in which the transaction runs. - options_ (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Required. Options for the new transaction. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.Transaction` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "begin_transaction" not in self._inner_api_calls: - self._inner_api_calls[ - "begin_transaction" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.begin_transaction, - default_retry=self._method_configs["BeginTransaction"].retry, - default_timeout=self._method_configs["BeginTransaction"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.BeginTransactionRequest(session=session, options=options_) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["begin_transaction"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def commit( - self, - session, - transaction_id=None, - single_use_transaction=None, - mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Commits a transaction. The request includes the mutations to be applied - to rows in the database. - - ``Commit`` might return an ``ABORTED`` error. This can occur at any - time; commonly, the cause is conflicts with concurrent transactions. - However, it can also happen for a variety of other reasons. If - ``Commit`` returns ``ABORTED``, the caller should re-attempt the - transaction from the beginning, re-using the same session. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> response = client.commit(session) - - Args: - session (str): Required. The session in which the transaction to be committed is running. - transaction_id (bytes): Commit a previously-started transaction. - single_use_transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Execute mutations in a temporary transaction. Note that unlike commit of - a previously-started transaction, commit with a temporary transaction is - non-idempotent. That is, if the ``CommitRequest`` is sent to Cloud - Spanner more than once (for instance, due to retries in the application, - or in the transport library), it is possible that the mutations are - executed more than once. If this is undesirable, use - ``BeginTransaction`` and ``Commit`` instead. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionOptions` - mutations (list[Union[dict, ~google.cloud.spanner_v1.types.Mutation]]): The mutations to be executed when this transaction commits. All - mutations are applied atomically, in the order they appear in - this list. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.CommitResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "commit" not in self._inner_api_calls: - self._inner_api_calls[ - "commit" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.commit, - default_retry=self._method_configs["Commit"].retry, - default_timeout=self._method_configs["Commit"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - transaction_id=transaction_id, single_use_transaction=single_use_transaction - ) - - request = spanner_pb2.CommitRequest( - session=session, - transaction_id=transaction_id, - single_use_transaction=single_use_transaction, - mutations=mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["commit"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def rollback( - self, - session, - transaction_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more ``Read`` - or ``ExecuteSql`` requests and ultimately decides not to commit. - - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, - the transaction was already aborted, or the transaction is not found. - ``Rollback`` never returns ``ABORTED``. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `transaction_id`: - >>> transaction_id = b'' - >>> - >>> client.rollback(session, transaction_id) - - Args: - session (str): Required. The session in which the transaction to roll back is running. - transaction_id (bytes): Required. The transaction to roll back. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "rollback" not in self._inner_api_calls: - self._inner_api_calls[ - "rollback" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.rollback, - default_retry=self._method_configs["Rollback"].retry, - default_timeout=self._method_configs["Rollback"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.RollbackRequest( - session=session, transaction_id=transaction_id - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["rollback"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partition_query( - self, - session, - sql, - transaction=None, - params=None, - param_types=None, - partition_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by ``ExecuteStreamingSql`` to specify a subset of the query result to - read. The same session and read-only transaction must be used by the - PartitionQueryRequest used to create the partition tokens and the - ExecuteSqlRequests that use the partition tokens. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, - and the whole operation must be restarted from the beginning. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `sql`: - >>> sql = '' - >>> - >>> response = client.partition_query(session, sql) - - Args: - session (str): Required. The session used to create the partitions. - sql (str): Required. The query request to generate partitions for. The request will - fail if the query is not root partitionable. The query plan of a root - partitionable query has a single distributed union operator. A - distributed union operator conceptually divides one or more tables into - multiple splits, remotely evaluates a subquery independently on each - split, and then unions all results. - - This must not contain DML commands, such as INSERT, UPDATE, or DELETE. - Use ``ExecuteStreamingSql`` with a PartitionedDml transaction for large, - partition-friendly DML operations. - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use - transactions are not. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): Parameter names and values that bind to placeholders in the SQL string. - - A parameter placeholder consists of the ``@`` character followed by the - parameter name (for example, ``@firstName``). Parameter names can - contain letters, numbers, and underscores. - - Parameters can appear anywhere that a literal value is expected. The - same parameter name can be used more than once, for example: - - ``"WHERE id > @msg_id AND id < @msg_id + 100"`` - - It is an error to execute a SQL statement with unbound parameters. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Struct` - param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values of - type ``STRING`` both appear in ``params`` as JSON strings. - - In these cases, ``param_types`` can be used to specify the exact SQL - type for some or all of the SQL query parameters. See the definition of - ``Type`` for more information about SQL types. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.Type` - partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.PartitionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partition_query" not in self._inner_api_calls: - self._inner_api_calls[ - "partition_query" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partition_query, - default_retry=self._method_configs["PartitionQuery"].retry, - default_timeout=self._method_configs["PartitionQuery"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.PartitionQueryRequest( - session=session, - sql=sql, - transaction=transaction, - params=params, - param_types=param_types, - partition_options=partition_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["partition_query"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partition_read( - self, - session, - table, - key_set, - transaction=None, - index=None, - columns=None, - partition_options=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read result to read. The - same session and read-only transaction must be used by the - PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering - guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a - partition\_token. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, - and the whole operation must be restarted from the beginning. - - Example: - >>> from google.cloud import spanner_v1 - >>> - >>> client = spanner_v1.SpannerClient() - >>> - >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') - >>> - >>> # TODO: Initialize `table`: - >>> table = '' - >>> - >>> # TODO: Initialize `key_set`: - >>> key_set = {} - >>> - >>> response = client.partition_read(session, table, key_set) - - Args: - session (str): Required. The session used to create the partitions. - table (str): Required. The name of the table in the database to be read. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` - names the primary keys of the rows in ``table`` to be yielded, unless - ``index`` is present. If ``index`` is present, then ``key_set`` instead - names index keys in ``index``. - - It is not an error for the ``key_set`` to name rows that do not exist in - the database. Read yields nothing for nonexistent rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.KeySet` - transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use - transactions are not. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is used - instead of the table primary key when interpreting ``key_set`` and - sorting result rows. See ``key_set`` for further information. - columns (list[str]): The columns of ``table`` to be returned for each row matching this - request. - partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.spanner_v1.types.PartitionOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partition_read" not in self._inner_api_calls: - self._inner_api_calls[ - "partition_read" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partition_read, - default_retry=self._method_configs["PartitionRead"].retry, - default_timeout=self._method_configs["PartitionRead"].timeout, - client_info=self._client_info, - ) - - request = spanner_pb2.PartitionReadRequest( - session=session, - table=table, - key_set=key_set, - transaction=transaction, - index=index, - columns=columns, - partition_options=partition_options, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("session", session)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["partition_read"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/spanner/google/cloud/spanner_v1/gapic/spanner_client_config.py b/spanner/google/cloud/spanner_v1/gapic/spanner_client_config.py deleted file mode 100644 index 44b81c5fb97b..000000000000 --- a/spanner/google/cloud/spanner_v1/gapic/spanner_client_config.py +++ /dev/null @@ -1,117 +0,0 @@ -config = { - "interfaces": { - "google.spanner.v1.Spanner": { - "retry_codes": { - "idempotent": ["UNAVAILABLE"], - "non_idempotent": [], - "long_running": ["UNAVAILABLE"], - }, - "retry_params": { - "default": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "streaming": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - "long_running": { - "initial_retry_delay_millis": 250, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 32000, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - }, - "methods": { - "CreateSession": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "BatchCreateSessions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "GetSession": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ListSessions": { - "timeout_millis": 3600000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "DeleteSession": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ExecuteSql": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "ExecuteStreamingSql": { - "timeout_millis": 3600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "streaming", - }, - "ExecuteBatchDml": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "Read": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "StreamingRead": { - "timeout_millis": 3600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "streaming", - }, - "BeginTransaction": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "Commit": { - "timeout_millis": 3600000, - "retry_codes_name": "long_running", - "retry_params_name": "long_running", - }, - "Rollback": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "PartitionQuery": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "PartitionRead": { - "timeout_millis": 30000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - }, - } - } -} diff --git a/spanner/google/cloud/spanner_v1/gapic/transports/__init__.py b/spanner/google/cloud/spanner_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config b/spanner/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config deleted file mode 100755 index c34397a1c869..000000000000 --- a/spanner/google/cloud/spanner_v1/gapic/transports/spanner.grpc.config +++ /dev/null @@ -1,88 +0,0 @@ -channel_pool: { - max_size: 10 - max_concurrent_streams_low_watermark: 100 -} -method: { - name: "/google.spanner.v1.Spanner/CreateSession" - affinity: { - command: BIND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/GetSession" - affinity: { - command: BOUND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/DeleteSession" - affinity: { - command: UNBIND - affinity_key: "name" - } -} -method: { - name: "/google.spanner.v1.Spanner/ExecuteSql" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/ExecuteStreamingSql" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Read" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/StreamingRead" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/BeginTransaction" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Commit" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/Rollback" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/PartitionQuery" - affinity: { - command: BOUND - affinity_key: "session" - } -} -method: { - name: "/google.spanner.v1.Spanner/PartitionRead" - affinity: { - command: BOUND - affinity_key: "session" - } -} diff --git a/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py b/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py deleted file mode 100644 index 47cedd3cc8dd..000000000000 --- a/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import pkg_resources -import grpc_gcp - -import google.api_core.grpc_helpers - -from google.cloud.spanner_v1.proto import spanner_pb2_grpc - - -_SPANNER_GRPC_CONFIG = "spanner.grpc.config" - - -class SpannerGrpcTransport(object): - """gRPC transport class providing stubs for - google.spanner.v1 Spanner API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/spanner.data", - ) - - def __init__( - self, channel=None, credentials=None, address="spanner.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = {"spanner_stub": spanner_pb2_grpc.SpannerStub(channel)} - - @classmethod - def create_channel( - cls, address="spanner.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - grpc_gcp_config = grpc_gcp.api_config_from_text_pb( - pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG) - ) - options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)] - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_session(self): - """Return the gRPC stub for :meth:`SpannerClient.create_session`. - - Creates a new session. A session can be used to perform transactions - that read and/or modify data in a Cloud Spanner database. Sessions are - meant to be reused for many consecutive transactions. - - Sessions can only execute one transaction at a time. To execute multiple - concurrent read-write/write-only transactions, create multiple sessions. - Note that standalone reads and queries use a transaction internally, and - count toward the one transaction limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner can delete sessions for which no operations are sent for more - than an hour. If a session is deleted, requests to it return - ``NOT_FOUND``. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``"SELECT 1"``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].CreateSession - - @property - def batch_create_sessions(self): - """Return the gRPC stub for :meth:`SpannerClient.batch_create_sessions`. - - Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].BatchCreateSessions - - @property - def get_session(self): - """Return the gRPC stub for :meth:`SpannerClient.get_session`. - - Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still alive. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].GetSession - - @property - def list_sessions(self): - """Return the gRPC stub for :meth:`SpannerClient.list_sessions`. - - Lists all sessions in a given database. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ListSessions - - @property - def delete_session(self): - """Return the gRPC stub for :meth:`SpannerClient.delete_session`. - - Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].DeleteSession - - @property - def execute_sql(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_sql`. - - Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; if the - query yields more data than that, the query fails with a - ``FAILED_PRECONDITION`` error. - - Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be fetched in streaming fashion by calling - ``ExecuteStreamingSql`` instead. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteSql - - @property - def execute_streaming_sql(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_streaming_sql`. - - Like ``ExecuteSql``, except returns the result set as a stream. Unlike - ``ExecuteSql``, there is no limit on the size of the returned result - set. However, no individual row in the result set can exceed 100 MiB, - and no column value can exceed 10 MiB. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteStreamingSql - - @property - def execute_batch_dml(self): - """Return the gRPC stub for :meth:`SpannerClient.execute_batch_dml`. - - Executes a batch of SQL DML statements. This method allows many - statements to be run with lower latency than submitting them - sequentially with ``ExecuteSql``. - - Statements are executed in sequential order. A request can succeed even - if a statement fails. The ``ExecuteBatchDmlResponse.status`` field in - the response provides information about the statement that failed. - Clients must inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining - statements are not executed. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].ExecuteBatchDml - - @property - def read(self): - """Return the gRPC stub for :meth:`SpannerClient.read`. - - Reads rows from the database using key lookups and scans, as a simple - key/value style alternative to ``ExecuteSql``. This method cannot be - used to return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a ``FAILED_PRECONDITION`` error. - - Reads inside read-write transactions might return ``ABORTED``. If this - occurs, the application should restart the transaction from the - beginning. See ``Transaction`` for more details. - - Larger result sets can be yielded in streaming fashion by calling - ``StreamingRead`` instead. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Read - - @property - def streaming_read(self): - """Return the gRPC stub for :meth:`SpannerClient.streaming_read`. - - Like ``Read``, except returns the result set as a stream. Unlike - ``Read``, there is no limit on the size of the returned result set. - However, no individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].StreamingRead - - @property - def begin_transaction(self): - """Return the gRPC stub for :meth:`SpannerClient.begin_transaction`. - - Begins a new transaction. This step can often be skipped: ``Read``, - ``ExecuteSql`` and ``Commit`` can begin a new transaction as a - side-effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].BeginTransaction - - @property - def commit(self): - """Return the gRPC stub for :meth:`SpannerClient.commit`. - - Commits a transaction. The request includes the mutations to be applied - to rows in the database. - - ``Commit`` might return an ``ABORTED`` error. This can occur at any - time; commonly, the cause is conflicts with concurrent transactions. - However, it can also happen for a variety of other reasons. If - ``Commit`` returns ``ABORTED``, the caller should re-attempt the - transaction from the beginning, re-using the same session. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Commit - - @property - def rollback(self): - """Return the gRPC stub for :meth:`SpannerClient.rollback`. - - Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more ``Read`` - or ``ExecuteSql`` requests and ultimately decides not to commit. - - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, - the transaction was already aborted, or the transaction is not found. - ``Rollback`` never returns ``ABORTED``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].Rollback - - @property - def partition_query(self): - """Return the gRPC stub for :meth:`SpannerClient.partition_query`. - - Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by ``ExecuteStreamingSql`` to specify a subset of the query result to - read. The same session and read-only transaction must be used by the - PartitionQueryRequest used to create the partition tokens and the - ExecuteSqlRequests that use the partition tokens. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, - and the whole operation must be restarted from the beginning. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].PartitionQuery - - @property - def partition_read(self): - """Return the gRPC stub for :meth:`SpannerClient.partition_read`. - - Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read result to read. The - same session and read-only transaction must be used by the - PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering - guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a - partition\_token. - - Partition tokens become invalid when the session used to create them is - deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, - and the whole operation must be restarted from the beginning. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["spanner_stub"].PartitionRead diff --git a/spanner/google/cloud/spanner_v1/instance.py b/spanner/google/cloud/spanner_v1/instance.py deleted file mode 100644 index 83a600bd108c..000000000000 --- a/spanner/google/cloud/spanner_v1/instance.py +++ /dev/null @@ -1,379 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Cloud Spanner Instance.""" - -import re - -from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, -) -from google.protobuf.field_mask_pb2 import FieldMask - -# pylint: disable=ungrouped-imports -from google.cloud.exceptions import NotFound -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1.database import Database -from google.cloud.spanner_v1.pool import BurstyPool - -# pylint: enable=ungrouped-imports - - -_INSTANCE_NAME_RE = re.compile( - r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" -) - -DEFAULT_NODE_COUNT = 1 - - -class Instance(object): - """Representation of a Cloud Spanner Instance. - - We can use a :class:`Instance` to: - - * :meth:`reload` itself - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - :type instance_id: str - :param instance_id: The ID of the instance. - - :type client: :class:`~google.cloud.spanner_v1.client.Client` - :param client: The client that owns the instance. Provides - authorization and a project ID. - - :type configuration_name: str - :param configuration_name: Name of the instance configuration defining - how the instance will be created. - Required for instances which do not yet exist. - - :type node_count: int - :param node_count: (Optional) Number of nodes allocated to the instance. - - :type display_name: str - :param display_name: (Optional) The display name for the instance in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the instance ID. - """ - - def __init__( - self, - instance_id, - client, - configuration_name=None, - node_count=DEFAULT_NODE_COUNT, - display_name=None, - ): - self.instance_id = instance_id - self._client = client - self.configuration_name = configuration_name - self.node_count = node_count - self.display_name = display_name or instance_id - - def _update_from_pb(self, instance_pb): - """Refresh self from the server-provided protobuf. - - Helper for :meth:`from_pb` and :meth:`reload`. - """ - if not instance_pb.display_name: # Simple field (string) - raise ValueError("Instance protobuf does not contain display_name") - self.display_name = instance_pb.display_name - self.configuration_name = instance_pb.config - self.node_count = instance_pb.node_count - - @classmethod - def from_pb(cls, instance_pb, client): - """Creates an instance from a protobuf. - - :type instance_pb: - :class:`google.spanner.v2.spanner_instance_admin_pb2.Instance` - :param instance_pb: A instance protobuf object. - - :type client: :class:`~google.cloud.spanner_v1.client.Client` - :param client: The client that owns the instance. - - :rtype: :class:`Instance` - :returns: The instance parsed from the protobuf response. - :raises ValueError: - if the instance name does not match - ``projects/{project}/instances/{instance_id}`` or if the parsed - project ID does not match the project ID on the client. - """ - match = _INSTANCE_NAME_RE.match(instance_pb.name) - if match is None: - raise ValueError( - "Instance protobuf name was not in the " "expected format.", - instance_pb.name, - ) - if match.group("project") != client.project: - raise ValueError( - "Project ID on instance does not match the " "project ID on the client" - ) - instance_id = match.group("instance_id") - configuration_name = instance_pb.config - - result = cls(instance_id, client, configuration_name) - result._update_from_pb(instance_pb) - return result - - @property - def name(self): - """Instance name used in requests. - - .. note:: - - This property will not change if ``instance_id`` does not, - but the return value is not cached. - - The instance name is of the form - - ``"projects/{project}/instances/{instance_id}"`` - - :rtype: str - :returns: The instance name. - """ - return self._client.project_name + "/instances/" + self.instance_id - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - # NOTE: This does not compare the configuration values, such as - # the display_name. Instead, it only compares - # identifying values instance ID and client. This is - # intentional, since the same instance can be in different states - # if not synchronized. Instances with similar instance - # settings but different clients can't be used in the same way. - return other.instance_id == self.instance_id and other._client == self._client - - def __ne__(self, other): - return not self == other - - def copy(self): - """Make a copy of this instance. - - Copies the local data stored as simple types and copies the client - attached to this instance. - - :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` - :returns: A copy of the current instance. - """ - new_client = self._client.copy() - return self.__class__( - self.instance_id, - new_client, - self.configuration_name, - node_count=self.node_count, - display_name=self.display_name, - ) - - def create(self): - """Create this instance. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance - - .. note:: - - Uses the ``project`` and ``instance_id`` on the current - :class:`Instance` in addition to the ``display_name``. - To change them before creating, reset the values via - - .. code:: python - - instance.display_name = 'New display name' - instance.instance_id = 'i-changed-my-mind' - - before calling :meth:`create`. - - :rtype: :class:`google.api_core.operation.Operation` - :returns: an operation instance - :raises Conflict: if the instance already exists - """ - api = self._client.instance_admin_api - instance_pb = admin_v1_pb2.Instance( - name=self.name, - config=self.configuration_name, - display_name=self.display_name, - node_count=self.node_count, - ) - metadata = _metadata_with_prefix(self.name) - - future = api.create_instance( - parent=self._client.project_name, - instance_id=self.instance_id, - instance=instance_pb, - metadata=metadata, - ) - - return future - - def exists(self): - """Test whether this instance exists. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig - - :rtype: bool - :returns: True if the instance exists, else false - """ - api = self._client.instance_admin_api - metadata = _metadata_with_prefix(self.name) - - try: - api.get_instance(self.name, metadata=metadata) - except NotFound: - return False - - return True - - def reload(self): - """Reload the metadata for this instance. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig - - :raises NotFound: if the instance does not exist - """ - api = self._client.instance_admin_api - metadata = _metadata_with_prefix(self.name) - - instance_pb = api.get_instance(self.name, metadata=metadata) - - self._update_from_pb(instance_pb) - - def update(self): - """Update this instance. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance - - .. note:: - - Updates the ``display_name`` and ``node_count``. To change those - values before updating, set them via - - .. code:: python - - instance.display_name = 'New display name' - instance.node_count = 5 - - before calling :meth:`update`. - - :rtype: :class:`google.api_core.operation.Operation` - :returns: an operation instance - :raises NotFound: if the instance does not exist - """ - api = self._client.instance_admin_api - instance_pb = admin_v1_pb2.Instance( - name=self.name, - config=self.configuration_name, - display_name=self.display_name, - node_count=self.node_count, - ) - field_mask = FieldMask(paths=["config", "display_name", "node_count"]) - metadata = _metadata_with_prefix(self.name) - - future = api.update_instance( - instance=instance_pb, field_mask=field_mask, metadata=metadata - ) - - return future - - def delete(self): - """Mark an instance and all of its databases for permanent deletion. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance - - Immediately upon completion of the request: - - * Billing will cease for all of the instance's reserved resources. - - Soon afterward: - - * The instance and all databases within the instance will be deleteed. - All data in the databases will be permanently deleted. - """ - api = self._client.instance_admin_api - metadata = _metadata_with_prefix(self.name) - - api.delete_instance(self.name, metadata=metadata) - - def database(self, database_id, ddl_statements=(), pool=None): - """Factory to create a database within this instance. - - :type database_id: str - :param database_id: The ID of the instance. - - :type ddl_statements: list of string - :param ddl_statements: (Optional) DDL statements, excluding the - 'CREATE DATABSE' statement. - - :type pool: concrete subclass of - :class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`. - :param pool: (Optional) session pool to be used by database. - - :rtype: :class:`~google.cloud.spanner_v1.database.Database` - :returns: a database owned by this instance. - """ - return Database(database_id, self, ddl_statements=ddl_statements, pool=pool) - - def list_databases(self, page_size=None, page_token=None): - """List databases for the instance. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases - - :type page_size: int - :param page_size: - Optional. The maximum number of databases in each page of results - from this request. Non-positive values are ignored. Defaults - to a sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of databases, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - - :rtype: :class:`~google.api._ore.page_iterator.Iterator` - :returns: - Iterator of :class:`~google.cloud.spanner_v1.database.Database` - resources within the current instance. - """ - metadata = _metadata_with_prefix(self.name) - page_iter = self._client.database_admin_api.list_databases( - self.name, page_size=page_size, metadata=metadata - ) - page_iter.next_page_token = page_token - page_iter.item_to_value = self._item_to_database - return page_iter - - def _item_to_database(self, iterator, database_pb): - """Convert a database protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type database_pb: :class:`~google.spanner.admin.database.v1.Database` - :param database_pb: A database returned from the API. - - :rtype: :class:`~google.cloud.spanner_v1.database.Database` - :returns: The next database in the page. - """ - return Database.from_pb(database_pb, self, pool=BurstyPool()) diff --git a/spanner/google/cloud/spanner_v1/keyset.py b/spanner/google/cloud/spanner_v1/keyset.py deleted file mode 100644 index fb45882bec21..000000000000 --- a/spanner/google/cloud/spanner_v1/keyset.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrap representation of Spanner keys / ranges.""" - -from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange as KeyRangePB -from google.cloud.spanner_v1.proto.keys_pb2 import KeySet as KeySetPB - -from google.cloud.spanner_v1._helpers import _make_list_value_pb -from google.cloud.spanner_v1._helpers import _make_list_value_pbs - - -class KeyRange(object): - """Identify range of table rows via start / end points. - - Specify either a `start_open` or `start_closed` key, or defaults to - `start_closed = []`. Specify either an `end_open` or `end_closed` key, - or defaults to `end_closed = []`. However, at least one key has to be - specified. If no keys are specified, ValueError is raised. - - :type start_open: list of scalars - :param start_open: keys identifying start of range (this key excluded) - - :type start_closed: list of scalars - :param start_closed: keys identifying start of range (this key included) - - :type end_open: list of scalars - :param end_open: keys identifying end of range (this key excluded) - - :type end_closed: list of scalars - :param end_closed: keys identifying end of range (this key included) - - :raises ValueError: if no keys are specified - """ - - def __init__( - self, start_open=None, start_closed=None, end_open=None, end_closed=None - ): - if not any([start_open, start_closed, end_open, end_closed]): - raise ValueError("Must specify at least a start or end row.") - - if start_open and start_closed: - raise ValueError("Specify one of 'start_open' / 'start_closed'.") - elif start_open is None and start_closed is None: - start_closed = [] - - if end_open and end_closed: - raise ValueError("Specify one of 'end_open' / 'end_closed'.") - elif end_open is None and end_closed is None: - end_closed = [] - - self.start_open = start_open - self.start_closed = start_closed - self.end_open = end_open - self.end_closed = end_closed - - def _to_pb(self): - """Construct a KeyRange protobuf. - - :rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeyRange` - :returns: protobuf corresponding to this instance. - """ - kwargs = {} - - if self.start_open is not None: - kwargs["start_open"] = _make_list_value_pb(self.start_open) - - if self.start_closed is not None: - kwargs["start_closed"] = _make_list_value_pb(self.start_closed) - - if self.end_open is not None: - kwargs["end_open"] = _make_list_value_pb(self.end_open) - - if self.end_closed is not None: - kwargs["end_closed"] = _make_list_value_pb(self.end_closed) - - return KeyRangePB(**kwargs) - - def _to_dict(self): - """Return keyrange's state as a dict. - - :rtype: dict - :returns: state of this instance. - """ - mapping = {} - - if self.start_open: - mapping["start_open"] = self.start_open - - if self.start_closed: - mapping["start_closed"] = self.start_closed - - if self.end_open: - mapping["end_open"] = self.end_open - - if self.end_closed: - mapping["end_closed"] = self.end_closed - - return mapping - - def __eq__(self, other): - """Compare by serialized state.""" - if not isinstance(other, self.__class__): - return NotImplemented - return self._to_dict() == other._to_dict() - - -class KeySet(object): - """Identify table rows via keys / ranges. - - :type keys: list of list of scalars - :param keys: keys identifying individual rows within a table. - - :type ranges: list of :class:`KeyRange` - :param ranges: ranges identifying rows within a table. - - :type all_: boolean - :param all_: if True, identify all rows within a table - """ - - def __init__(self, keys=(), ranges=(), all_=False): - if all_ and (keys or ranges): - raise ValueError("'all_' is exclusive of 'keys' / 'ranges'.") - self.keys = list(keys) - self.ranges = list(ranges) - self.all_ = all_ - - def _to_pb(self): - """Construct a KeySet protobuf. - - :rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet` - :returns: protobuf corresponding to this instance. - """ - if self.all_: - return KeySetPB(all=True) - kwargs = {} - - if self.keys: - kwargs["keys"] = _make_list_value_pbs(self.keys) - - if self.ranges: - kwargs["ranges"] = [krange._to_pb() for krange in self.ranges] - - return KeySetPB(**kwargs) - - def _to_dict(self): - """Return keyset's state as a dict. - - The result can be used to serialize the instance and reconstitute - it later using :meth:`_from_dict`. - - :rtype: dict - :returns: state of this instance. - """ - if self.all_: - return {"all": True} - - return { - "keys": self.keys, - "ranges": [keyrange._to_dict() for keyrange in self.ranges], - } - - def __eq__(self, other): - """Compare by serialized state.""" - if not isinstance(other, self.__class__): - return NotImplemented - return self._to_dict() == other._to_dict() - - @classmethod - def _from_dict(cls, mapping): - """Create an instance from the corresponding state mapping. - - :type mapping: dict - :param mapping: the instance state. - """ - if mapping.get("all"): - return cls(all_=True) - - r_mappings = mapping.get("ranges", ()) - ranges = [KeyRange(**r_mapping) for r_mapping in r_mappings] - - return cls(keys=mapping.get("keys", ()), ranges=ranges) diff --git a/spanner/google/cloud/spanner_v1/param_types.py b/spanner/google/cloud/spanner_v1/param_types.py deleted file mode 100644 index 47442bfc4bd2..000000000000 --- a/spanner/google/cloud/spanner_v1/param_types.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Types exported from this package.""" - -from google.cloud.spanner_v1.proto import type_pb2 - - -# Scalar parameter types -STRING = type_pb2.Type(code=type_pb2.STRING) -BYTES = type_pb2.Type(code=type_pb2.BYTES) -BOOL = type_pb2.Type(code=type_pb2.BOOL) -INT64 = type_pb2.Type(code=type_pb2.INT64) -FLOAT64 = type_pb2.Type(code=type_pb2.FLOAT64) -DATE = type_pb2.Type(code=type_pb2.DATE) -TIMESTAMP = type_pb2.Type(code=type_pb2.TIMESTAMP) - - -def Array(element_type): # pylint: disable=invalid-name - """Construct an array parameter type description protobuf. - - :type element_type: :class:`type_pb2.Type` - :param element_type: the type of elements of the array - - :rtype: :class:`type_pb2.Type` - :returns: the appropriate array-type protobuf - """ - return type_pb2.Type(code=type_pb2.ARRAY, array_element_type=element_type) - - -def StructField(name, field_type): # pylint: disable=invalid-name - """Construct a field description protobuf. - - :type name: str - :param name: the name of the field - - :type field_type: :class:`type_pb2.Type` - :param field_type: the type of the field - - :rtype: :class:`type_pb2.StructType.Field` - :returns: the appropriate struct-field-type protobuf - """ - return type_pb2.StructType.Field(name=name, type=field_type) - - -def Struct(fields): # pylint: disable=invalid-name - """Construct a struct parameter type description protobuf. - - :type fields: list of :class:`type_pb2.StructType.Field` - :param fields: the fields of the struct - - :rtype: :class:`type_pb2.Type` - :returns: the appropriate struct-type protobuf - """ - return type_pb2.Type( - code=type_pb2.STRUCT, struct_type=type_pb2.StructType(fields=fields) - ) diff --git a/spanner/google/cloud/spanner_v1/pool.py b/spanner/google/cloud/spanner_v1/pool.py deleted file mode 100644 index ce7a196b6bb8..000000000000 --- a/spanner/google/cloud/spanner_v1/pool.py +++ /dev/null @@ -1,541 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Pools managing shared Session objects.""" - -import datetime - -from six.moves import queue - -from google.cloud.exceptions import NotFound -from google.cloud.spanner_v1._helpers import _metadata_with_prefix - - -_NOW = datetime.datetime.utcnow # unit tests may replace - - -class AbstractSessionPool(object): - """Specifies required API for concrete session pool implementations. - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for sessions created - by the pool. - """ - - _database = None - - def __init__(self, labels=None): - if labels is None: - labels = {} - self._labels = labels - - @property - def labels(self): - """User-assigned labels for sesions created by the pool. - - :rtype: dict (str -> str) - :returns: labels assigned by the user - """ - return self._labels - - def bind(self, database): - """Associate the pool with a database. - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: database used by the pool: used to create sessions - when needed. - - Concrete implementations of this method may pre-fill the pool - using the database. - - :raises NotImplementedError: abstract method - """ - raise NotImplementedError() - - def get(self): - """Check a session out from the pool. - - Concrete implementations of this method are allowed to raise an - error to signal that the pool is exhausted, or to block until a - session is available. - - :raises NotImplementedError: abstract method - """ - raise NotImplementedError() - - def put(self, session): - """Return a session to the pool. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session being returned. - - Concrete implementations of this method are allowed to raise an - error to signal that the pool is full, or to block until it is - not full. - - :raises NotImplementedError: abstract method - """ - raise NotImplementedError() - - def clear(self): - """Delete all sessions in the pool. - - Concrete implementations of this method are allowed to raise an - error to signal that the pool is full, or to block until it is - not full. - - :raises NotImplementedError: abstract method - """ - raise NotImplementedError() - - def _new_session(self): - """Helper for concrete methods creating session instances. - - :rtype: :class:`~google.cloud.spanner_v1.session.Session` - :returns: new session instance. - """ - if self.labels: - return self._database.session(labels=self.labels) - return self._database.session() - - def session(self, **kwargs): - """Check out a session from the pool. - - :param kwargs: (optional) keyword arguments, passed through to - the returned checkout. - - :rtype: :class:`~google.cloud.spanner_v1.session.SessionCheckout` - :returns: a checkout instance, to be used as a context manager for - accessing the session and returning it to the pool. - """ - return SessionCheckout(self, **kwargs) - - -class FixedSizePool(AbstractSessionPool): - """Concrete session pool implementation: - - - Pre-allocates / creates a fixed number of sessions. - - - "Pings" existing sessions via :meth:`session.exists` before returning - them, and replaces expired sessions. - - - Blocks, with a timeout, when :meth:`get` is called on an empty pool. - Raises after timing out. - - - Raises when :meth:`put` is called on a full pool. That error is - never expected in normal practice, as users should be calling - :meth:`get` followed by :meth:`put` whenever in need of a session. - - :type size: int - :param size: fixed pool size - - :type default_timeout: int - :param default_timeout: default timeout, in seconds, to wait for - a returned session. - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for sessions created - by the pool. - """ - - DEFAULT_SIZE = 10 - DEFAULT_TIMEOUT = 10 - - def __init__(self, size=DEFAULT_SIZE, default_timeout=DEFAULT_TIMEOUT, labels=None): - super(FixedSizePool, self).__init__(labels=labels) - self.size = size - self.default_timeout = default_timeout - self._sessions = queue.LifoQueue(size) - - def bind(self, database): - """Associate the pool with a database. - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: database used by the pool: used to create sessions - when needed. - """ - self._database = database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - - while not self._sessions.full(): - resp = api.batch_create_sessions( - database.name, - self.size - self._sessions.qsize(), - timeout=self.default_timeout, - metadata=metadata, - ) - for session_pb in resp.session: - session = self._new_session() - session._session_id = session_pb.name.split("/")[-1] - self._sessions.put(session) - - def get(self, timeout=None): # pylint: disable=arguments-differ - """Check a session out from the pool. - - :type timeout: int - :param timeout: seconds to block waiting for an available session - - :rtype: :class:`~google.cloud.spanner_v1.session.Session` - :returns: an existing session from the pool, or a newly-created - session. - :raises: :exc:`six.moves.queue.Empty` if the queue is empty. - """ - if timeout is None: - timeout = self.default_timeout - - session = self._sessions.get(block=True, timeout=timeout) - - if not session.exists(): - session = self._database.session() - session.create() - - return session - - def put(self, session): - """Return a session to the pool. - - Never blocks: if the pool is full, raises. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session being returned. - - :raises: :exc:`six.moves.queue.Full` if the queue is full. - """ - self._sessions.put_nowait(session) - - def clear(self): - """Delete all sessions in the pool.""" - - while True: - try: - session = self._sessions.get(block=False) - except queue.Empty: - break - else: - session.delete() - - -class BurstyPool(AbstractSessionPool): - """Concrete session pool implementation: - - - "Pings" existing sessions via :meth:`session.exists` before returning - them. - - - Creates a new session, rather than blocking, when :meth:`get` is called - on an empty pool. - - - Discards the returned session, rather than blocking, when :meth:`put` - is called on a full pool. - - :type target_size: int - :param target_size: max pool size - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for sessions created - by the pool. - """ - - def __init__(self, target_size=10, labels=None): - super(BurstyPool, self).__init__(labels=labels) - self.target_size = target_size - self._database = None - self._sessions = queue.LifoQueue(target_size) - - def bind(self, database): - """Associate the pool with a database. - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: database used by the pool: used to create sessions - when needed. - """ - self._database = database - - def get(self): - """Check a session out from the pool. - - :rtype: :class:`~google.cloud.spanner_v1.session.Session` - :returns: an existing session from the pool, or a newly-created - session. - """ - try: - session = self._sessions.get_nowait() - except queue.Empty: - session = self._new_session() - session.create() - else: - if not session.exists(): - session = self._new_session() - session.create() - return session - - def put(self, session): - """Return a session to the pool. - - Never blocks: if the pool is full, the returned session is - discarded. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session being returned. - """ - try: - self._sessions.put_nowait(session) - except queue.Full: - try: - session.delete() - except NotFound: - pass - - def clear(self): - """Delete all sessions in the pool.""" - - while True: - try: - session = self._sessions.get(block=False) - except queue.Empty: - break - else: - session.delete() - - -class PingingPool(AbstractSessionPool): - """Concrete session pool implementation: - - - Pre-allocates / creates a fixed number of sessions. - - - Sessions are used in "round-robin" order (LRU first). - - - "Pings" existing sessions in the background after a specified interval - via an API call (``session.exists()``). - - - Blocks, with a timeout, when :meth:`get` is called on an empty pool. - Raises after timing out. - - - Raises when :meth:`put` is called on a full pool. That error is - never expected in normal practice, as users should be calling - :meth:`get` followed by :meth:`put` whenever in need of a session. - - The application is responsible for calling :meth:`ping` at appropriate - times, e.g. from a background thread. - - :type size: int - :param size: fixed pool size - - :type default_timeout: int - :param default_timeout: default timeout, in seconds, to wait for - a returned session. - - :type ping_interval: int - :param ping_interval: interval at which to ping sessions. - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for sessions created - by the pool. - """ - - def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None): - super(PingingPool, self).__init__(labels=labels) - self.size = size - self.default_timeout = default_timeout - self._delta = datetime.timedelta(seconds=ping_interval) - self._sessions = queue.PriorityQueue(size) - - def bind(self, database): - """Associate the pool with a database. - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: database used by the pool: used to create sessions - when needed. - """ - self._database = database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - created_session_count = 0 - - while created_session_count < self.size: - resp = api.batch_create_sessions( - database.name, - self.size - created_session_count, - timeout=self.default_timeout, - metadata=metadata, - ) - for session_pb in resp.session: - session = self._new_session() - session._session_id = session_pb.name.split("/")[-1] - self.put(session) - created_session_count += len(resp.session) - - def get(self, timeout=None): # pylint: disable=arguments-differ - """Check a session out from the pool. - - :type timeout: int - :param timeout: seconds to block waiting for an available session - - :rtype: :class:`~google.cloud.spanner_v1.session.Session` - :returns: an existing session from the pool, or a newly-created - session. - :raises: :exc:`six.moves.queue.Empty` if the queue is empty. - """ - if timeout is None: - timeout = self.default_timeout - - ping_after, session = self._sessions.get(block=True, timeout=timeout) - - if _NOW() > ping_after: - if not session.exists(): - session = self._new_session() - session.create() - - return session - - def put(self, session): - """Return a session to the pool. - - Never blocks: if the pool is full, raises. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session being returned. - - :raises: :exc:`six.moves.queue.Full` if the queue is full. - """ - self._sessions.put_nowait((_NOW() + self._delta, session)) - - def clear(self): - """Delete all sessions in the pool.""" - while True: - try: - _, session = self._sessions.get(block=False) - except queue.Empty: - break - else: - session.delete() - - def ping(self): - """Refresh maybe-expired sessions in the pool. - - This method is designed to be called from a background thread, - or during the "idle" phase of an event loop. - """ - while True: - try: - ping_after, session = self._sessions.get(block=False) - except queue.Empty: # all sessions in use - break - if ping_after > _NOW(): # oldest session is fresh - # Re-add to queue with existing expiration - self._sessions.put((ping_after, session)) - break - if not session.exists(): # stale - session = self._new_session() - session.create() - # Re-add to queue with new expiration - self.put(session) - - -class TransactionPingingPool(PingingPool): - """Concrete session pool implementation: - - In addition to the features of :class:`PingingPool`, this class - creates and begins a transaction for each of its sessions at startup. - - When a session is returned to the pool, if its transaction has been - committed or rolled back, the pool creates a new transaction for the - session and pushes the transaction onto a separate queue of "transactions - to begin." The application is responsible for flushing this queue - as appropriate via the pool's :meth:`begin_pending_transactions` method. - - :type size: int - :param size: fixed pool size - - :type default_timeout: int - :param default_timeout: default timeout, in seconds, to wait for - a returned session. - - :type ping_interval: int - :param ping_interval: interval at which to ping sessions. - - :type labels: dict (str -> str) or None - :param labels: (Optional) user-assigned labels for sessions created - by the pool. - """ - - def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None): - self._pending_sessions = queue.Queue() - - super(TransactionPingingPool, self).__init__( - size, default_timeout, ping_interval, labels=labels - ) - - self.begin_pending_transactions() - - def bind(self, database): - """Associate the pool with a database. - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: database used by the pool: used to create sessions - when needed. - """ - super(TransactionPingingPool, self).bind(database) - self.begin_pending_transactions() - - def put(self, session): - """Return a session to the pool. - - Never blocks: if the pool is full, raises. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session being returned. - - :raises: :exc:`six.moves.queue.Full` if the queue is full. - """ - if self._sessions.full(): - raise queue.Full - - txn = session._transaction - if txn is None or txn.committed or txn._rolled_back: - session.transaction() - self._pending_sessions.put(session) - else: - super(TransactionPingingPool, self).put(session) - - def begin_pending_transactions(self): - """Begin all transactions for sessions added to the pool.""" - while not self._pending_sessions.empty(): - session = self._pending_sessions.get() - session._transaction.begin() - super(TransactionPingingPool, self).put(session) - - -class SessionCheckout(object): - """Context manager: hold session checked out from a pool. - - :type pool: concrete subclass of - :class:`~google.cloud.spanner_v1.session.AbstractSessionPool` - :param pool: Pool from which to check out a session. - - :param kwargs: extra keyword arguments to be passed to :meth:`pool.get`. - """ - - _session = None # Not checked out until '__enter__'. - - def __init__(self, pool, **kwargs): - self._pool = pool - self._kwargs = kwargs.copy() - - def __enter__(self): - self._session = self._pool.get(**self._kwargs) - return self._session - - def __exit__(self, *ignored): - self._pool.put(self._session) diff --git a/spanner/google/cloud/spanner_v1/proto/__init__.py b/spanner/google/cloud/spanner_v1/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/google/cloud/spanner_v1/proto/keys.proto b/spanner/google/cloud/spanner_v1/proto/keys.proto deleted file mode 100644 index de5307aaaf93..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/keys.proto +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/protobuf/struct.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "KeysProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// KeyRange represents a range of rows in a table or index. -// -// A range has a start key and an end key. These keys can be open or -// closed, indicating if the range includes rows with that key. -// -// Keys are represented by lists, where the ith value in the list -// corresponds to the ith component of the table or index primary key. -// Individual values are encoded as described -// [here][google.spanner.v1.TypeCode]. -// -// For example, consider the following table definition: -// -// CREATE TABLE UserEvents ( -// UserName STRING(MAX), -// EventDate STRING(10) -// ) PRIMARY KEY(UserName, EventDate); -// -// The following keys name rows in this table: -// -// ["Bob", "2014-09-23"] -// ["Alfred", "2015-06-12"] -// -// Since the `UserEvents` table's `PRIMARY KEY` clause names two -// columns, each `UserEvents` key has two elements; the first is the -// `UserName`, and the second is the `EventDate`. -// -// Key ranges with multiple components are interpreted -// lexicographically by component using the table or index key's declared -// sort order. For example, the following range returns all events for -// user `"Bob"` that occurred in the year 2015: -// -// "start_closed": ["Bob", "2015-01-01"] -// "end_closed": ["Bob", "2015-12-31"] -// -// Start and end keys can omit trailing key components. This affects the -// inclusion and exclusion of rows that exactly match the provided key -// components: if the key is closed, then rows that exactly match the -// provided components are included; if the key is open, then rows -// that exactly match are not included. -// -// For example, the following range includes all events for `"Bob"` that -// occurred during and after the year 2000: -// -// "start_closed": ["Bob", "2000-01-01"] -// "end_closed": ["Bob"] -// -// The next example retrieves all events for `"Bob"`: -// -// "start_closed": ["Bob"] -// "end_closed": ["Bob"] -// -// To retrieve events before the year 2000: -// -// "start_closed": ["Bob"] -// "end_open": ["Bob", "2000-01-01"] -// -// The following range includes all rows in the table: -// -// "start_closed": [] -// "end_closed": [] -// -// This range returns all users whose `UserName` begins with any -// character from A to C: -// -// "start_closed": ["A"] -// "end_open": ["D"] -// -// This range returns all users whose `UserName` begins with B: -// -// "start_closed": ["B"] -// "end_open": ["C"] -// -// Key ranges honor column sort order. For example, suppose a table is -// defined as follows: -// -// CREATE TABLE DescendingSortedTable { -// Key INT64, -// ... -// ) PRIMARY KEY(Key DESC); -// -// The following range retrieves all rows with key values between 1 -// and 100 inclusive: -// -// "start_closed": ["100"] -// "end_closed": ["1"] -// -// Note that 100 is passed as the start, and 1 is passed as the end, -// because `Key` is a descending column in the schema. -message KeyRange { - // The start key must be provided. It can be either closed or open. - oneof start_key_type { - // If the start is closed, then the range includes all rows whose - // first `len(start_closed)` key columns exactly match `start_closed`. - google.protobuf.ListValue start_closed = 1; - - // If the start is open, then the range excludes rows whose first - // `len(start_open)` key columns exactly match `start_open`. - google.protobuf.ListValue start_open = 2; - } - - // The end key must be provided. It can be either closed or open. - oneof end_key_type { - // If the end is closed, then the range includes all rows whose - // first `len(end_closed)` key columns exactly match `end_closed`. - google.protobuf.ListValue end_closed = 3; - - // If the end is open, then the range excludes rows whose first - // `len(end_open)` key columns exactly match `end_open`. - google.protobuf.ListValue end_open = 4; - } -} - -// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All -// the keys are expected to be in the same table or index. The keys need -// not be sorted in any particular way. -// -// If the same key is specified multiple times in the set (for example -// if two ranges, two keys, or a key and a range overlap), Cloud Spanner -// behaves as if the key were only specified once. -message KeySet { - // A list of specific keys. Entries in `keys` should have exactly as - // many elements as there are columns in the primary or index key - // with which this `KeySet` is used. Individual key values are - // encoded as described [here][google.spanner.v1.TypeCode]. - repeated google.protobuf.ListValue keys = 1; - - // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about - // key range specifications. - repeated KeyRange ranges = 2; - - // For convenience `all` can be set to `true` to indicate that this - // `KeySet` matches all keys in the table or index. Note that any keys - // specified in `keys` or `ranges` are only yielded once. - bool all = 3; -} diff --git a/spanner/google/cloud/spanner_v1/proto/keys_pb2.py b/spanner/google/cloud/spanner_v1/proto/keys_pb2.py deleted file mode 100644 index 9d38124847ba..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/keys_pb2.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/keys.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/keys.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\tKeysProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n(google/cloud/spanner_v1/proto/keys.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\xf4\x01\n\x08KeyRange\x12\x32\n\x0cstart_closed\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\x30\n\nstart_open\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\x30\n\nend_closed\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x01\x12.\n\x08\x65nd_open\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x01\x42\x10\n\x0estart_key_typeB\x0e\n\x0c\x65nd_key_type"l\n\x06KeySet\x12(\n\x04keys\x18\x01 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x12+\n\x06ranges\x18\x02 \x03(\x0b\x32\x1b.google.spanner.v1.KeyRange\x12\x0b\n\x03\x61ll\x18\x03 \x01(\x08\x42\x92\x01\n\x15\x63om.google.spanner.v1B\tKeysProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_KEYRANGE = _descriptor.Descriptor( - name="KeyRange", - full_name="google.spanner.v1.KeyRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="start_closed", - full_name="google.spanner.v1.KeyRange.start_closed", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="start_open", - full_name="google.spanner.v1.KeyRange.start_open", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_closed", - full_name="google.spanner.v1.KeyRange.end_closed", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="end_open", - full_name="google.spanner.v1.KeyRange.end_open", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key_type", - full_name="google.spanner.v1.KeyRange.start_key_type", - index=0, - containing_type=None, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key_type", - full_name="google.spanner.v1.KeyRange.end_key_type", - index=1, - containing_type=None, - fields=[], - ), - ], - serialized_start=124, - serialized_end=368, -) - - -_KEYSET = _descriptor.Descriptor( - name="KeySet", - full_name="google.spanner.v1.KeySet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="keys", - full_name="google.spanner.v1.KeySet.keys", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="ranges", - full_name="google.spanner.v1.KeySet.ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="all", - full_name="google.spanner.v1.KeySet.all", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=370, - serialized_end=478, -) - -_KEYRANGE.fields_by_name[ - "start_closed" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "start_open" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "end_closed" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.fields_by_name[ - "end_open" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYRANGE.oneofs_by_name["start_key_type"].fields.append( - _KEYRANGE.fields_by_name["start_closed"] -) -_KEYRANGE.fields_by_name["start_closed"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "start_key_type" -] -_KEYRANGE.oneofs_by_name["start_key_type"].fields.append( - _KEYRANGE.fields_by_name["start_open"] -) -_KEYRANGE.fields_by_name["start_open"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "start_key_type" -] -_KEYRANGE.oneofs_by_name["end_key_type"].fields.append( - _KEYRANGE.fields_by_name["end_closed"] -) -_KEYRANGE.fields_by_name["end_closed"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "end_key_type" -] -_KEYRANGE.oneofs_by_name["end_key_type"].fields.append( - _KEYRANGE.fields_by_name["end_open"] -) -_KEYRANGE.fields_by_name["end_open"].containing_oneof = _KEYRANGE.oneofs_by_name[ - "end_key_type" -] -_KEYSET.fields_by_name[ - "keys" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_KEYSET.fields_by_name["ranges"].message_type = _KEYRANGE -DESCRIPTOR.message_types_by_name["KeyRange"] = _KEYRANGE -DESCRIPTOR.message_types_by_name["KeySet"] = _KEYSET -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -KeyRange = _reflection.GeneratedProtocolMessageType( - "KeyRange", - (_message.Message,), - dict( - DESCRIPTOR=_KEYRANGE, - __module__="google.cloud.spanner_v1.proto.keys_pb2", - __doc__="""KeyRange represents a range of rows in a table or index. - - A range has a start key and an end key. These keys can be open or - closed, indicating if the range includes rows with that key. - - Keys are represented by lists, where the ith value in the list - corresponds to the ith component of the table or index primary key. - Individual values are encoded as described - [here][google.spanner.v1.TypeCode]. - - For example, consider the following table definition: - - :: - - CREATE TABLE UserEvents ( - UserName STRING(MAX), - EventDate STRING(10) - ) PRIMARY KEY(UserName, EventDate); - - The following keys name rows in this table: - - :: - - ["Bob", "2014-09-23"] - ["Alfred", "2015-06-12"] - - Since the ``UserEvents`` table's ``PRIMARY KEY`` clause names two - columns, each ``UserEvents`` key has two elements; the first is the - ``UserName``, and the second is the ``EventDate``. - - Key ranges with multiple components are interpreted lexicographically by - component using the table or index key's declared sort order. For - example, the following range returns all events for user ``"Bob"`` that - occurred in the year 2015: - - :: - - "start_closed": ["Bob", "2015-01-01"] - "end_closed": ["Bob", "2015-12-31"] - - Start and end keys can omit trailing key components. This affects the - inclusion and exclusion of rows that exactly match the provided key - components: if the key is closed, then rows that exactly match the - provided components are included; if the key is open, then rows that - exactly match are not included. - - For example, the following range includes all events for ``"Bob"`` that - occurred during and after the year 2000: - - :: - - "start_closed": ["Bob", "2000-01-01"] - "end_closed": ["Bob"] - - The next example retrieves all events for ``"Bob"``: - - :: - - "start_closed": ["Bob"] - "end_closed": ["Bob"] - - To retrieve events before the year 2000: - - :: - - "start_closed": ["Bob"] - "end_open": ["Bob", "2000-01-01"] - - The following range includes all rows in the table: - - :: - - "start_closed": [] - "end_closed": [] - - This range returns all users whose ``UserName`` begins with any - character from A to C: - - :: - - "start_closed": ["A"] - "end_open": ["D"] - - This range returns all users whose ``UserName`` begins with B: - - :: - - "start_closed": ["B"] - "end_open": ["C"] - - Key ranges honor column sort order. For example, suppose a table is - defined as follows: - - :: - - CREATE TABLE DescendingSortedTable { - Key INT64, - ... - ) PRIMARY KEY(Key DESC); - - The following range retrieves all rows with key values between 1 and 100 - inclusive: - - :: - - "start_closed": ["100"] - "end_closed": ["1"] - - Note that 100 is passed as the start, and 1 is passed as the end, - because ``Key`` is a descending column in the schema. - - - Attributes: - start_key_type: - The start key must be provided. It can be either closed or - open. - start_closed: - If the start is closed, then the range includes all rows whose - first ``len(start_closed)`` key columns exactly match - ``start_closed``. - start_open: - If the start is open, then the range excludes rows whose first - ``len(start_open)`` key columns exactly match ``start_open``. - end_key_type: - The end key must be provided. It can be either closed or open. - end_closed: - If the end is closed, then the range includes all rows whose - first ``len(end_closed)`` key columns exactly match - ``end_closed``. - end_open: - If the end is open, then the range excludes rows whose first - ``len(end_open)`` key columns exactly match ``end_open``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.KeyRange) - ), -) -_sym_db.RegisterMessage(KeyRange) - -KeySet = _reflection.GeneratedProtocolMessageType( - "KeySet", - (_message.Message,), - dict( - DESCRIPTOR=_KEYSET, - __module__="google.cloud.spanner_v1.proto.keys_pb2", - __doc__="""\ ``KeySet`` defines a collection of Cloud Spanner keys - and/or key ranges. All the keys are expected to be in the same table or - index. The keys need not be sorted in any particular way. - - If the same key is specified multiple times in the set (for example if - two ranges, two keys, or a key and a range overlap), Cloud Spanner - behaves as if the key were only specified once. - - - Attributes: - keys: - A list of specific keys. Entries in ``keys`` should have - exactly as many elements as there are columns in the primary - or index key with which this ``KeySet`` is used. Individual - key values are encoded as described - [here][google.spanner.v1.TypeCode]. - ranges: - A list of key ranges. See - [KeyRange][google.spanner.v1.KeyRange] for more information - about key range specifications. - all: - For convenience ``all`` can be set to ``true`` to indicate - that this ``KeySet`` matches all keys in the table or index. - Note that any keys specified in ``keys`` or ``ranges`` are - only yielded once. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.KeySet) - ), -) -_sym_db.RegisterMessage(KeySet) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/keys_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/keys_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/keys_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/proto/mutation.proto b/spanner/google/cloud/spanner_v1/proto/mutation.proto deleted file mode 100644 index 7df99c0ee6f2..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/mutation.proto +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/protobuf/struct.proto"; -import "google/spanner/v1/keys.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "MutationProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// A modification to one or more Cloud Spanner rows. Mutations can be -// applied to a Cloud Spanner database by sending them in a -// [Commit][google.spanner.v1.Spanner.Commit] call. -message Mutation { - // Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and - // [replace][google.spanner.v1.Mutation.replace] operations. - message Write { - // Required. The table whose rows will be written. - string table = 1; - - // The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written. - // - // The list of columns must contain enough columns to allow - // Cloud Spanner to derive values for all primary key columns in the - // row(s) to be modified. - repeated string columns = 2; - - // The values to be written. `values` can contain more than one - // list of values. If it does, then multiple rows are written, one - // for each entry in `values`. Each list in `values` must have - // exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns] - // above. Sending multiple lists is equivalent to sending multiple - // `Mutation`s, each containing one `values` entry and repeating - // [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are - // encoded as described [here][google.spanner.v1.TypeCode]. - repeated google.protobuf.ListValue values = 3; - } - - // Arguments to [delete][google.spanner.v1.Mutation.delete] operations. - message Delete { - // Required. The table whose rows will be deleted. - string table = 1; - - // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. - // Delete is idempotent. The transaction will succeed even if some or all - // rows do not exist. - KeySet key_set = 2; - } - - // Required. The operation to perform. - oneof operation { - // Insert new rows in a table. If any of the rows already exist, - // the write or transaction fails with error `ALREADY_EXISTS`. - Write insert = 1; - - // Update existing rows in a table. If any of the rows does not - // already exist, the transaction fails with error `NOT_FOUND`. - Write update = 2; - - // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then - // its column values are overwritten with the ones provided. Any - // column values not explicitly written are preserved. - Write insert_or_update = 3; - - // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is - // deleted, and the column values provided are inserted - // instead. Unlike [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this means any values not - // explicitly written become `NULL`. - // - // In an interleaved table, if you create the child table with the - // `ON DELETE CASCADE` annotation, then replacing a parent row - // also deletes the child rows. Otherwise, you must delete the - // child rows before you replace the parent row. - Write replace = 4; - - // Delete rows from a table. Succeeds whether or not the named - // rows were present. - Delete delete = 5; - } -} diff --git a/spanner/google/cloud/spanner_v1/proto/mutation_pb2.py b/spanner/google/cloud/spanner_v1/proto/mutation_pb2.py deleted file mode 100644 index db5a781f6993..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/mutation_pb2.py +++ /dev/null @@ -1,436 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/mutation.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.cloud.spanner_v1.proto import ( - keys_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/mutation.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\rMutationProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n,google/cloud/spanner_v1/proto/mutation.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a(google/cloud/spanner_v1/proto/keys.proto\x1a\x1cgoogle/api/annotations.proto"\xc6\x03\n\x08Mutation\x12\x33\n\x06insert\x18\x01 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x33\n\x06update\x18\x02 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12=\n\x10insert_or_update\x18\x03 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x34\n\x07replace\x18\x04 \x01(\x0b\x32!.google.spanner.v1.Mutation.WriteH\x00\x12\x34\n\x06\x64\x65lete\x18\x05 \x01(\x0b\x32".google.spanner.v1.Mutation.DeleteH\x00\x1aS\n\x05Write\x12\r\n\x05table\x18\x01 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x02 \x03(\t\x12*\n\x06values\x18\x03 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x1a\x43\n\x06\x44\x65lete\x12\r\n\x05table\x18\x01 \x01(\t\x12*\n\x07key_set\x18\x02 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x0b\n\toperationB\x96\x01\n\x15\x63om.google.spanner.v1B\rMutationProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_MUTATION_WRITE = _descriptor.Descriptor( - name="Write", - full_name="google.spanner.v1.Mutation.Write", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table", - full_name="google.spanner.v1.Mutation.Write.table", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.spanner.v1.Mutation.Write.columns", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.spanner.v1.Mutation.Write.values", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=542, -) - -_MUTATION_DELETE = _descriptor.Descriptor( - name="Delete", - full_name="google.spanner.v1.Mutation.Delete", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="table", - full_name="google.spanner.v1.Mutation.Delete.table", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="key_set", - full_name="google.spanner.v1.Mutation.Delete.key_set", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=544, - serialized_end=611, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.spanner.v1.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="insert", - full_name="google.spanner.v1.Mutation.insert", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.spanner.v1.Mutation.update", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="insert_or_update", - full_name="google.spanner.v1.Mutation.insert_or_update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="replace", - full_name="google.spanner.v1.Mutation.replace", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="delete", - full_name="google.spanner.v1.Mutation.delete", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_MUTATION_WRITE, _MUTATION_DELETE], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="operation", - full_name="google.spanner.v1.Mutation.operation", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=170, - serialized_end=624, -) - -_MUTATION_WRITE.fields_by_name[ - "values" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_MUTATION_WRITE.containing_type = _MUTATION -_MUTATION_DELETE.fields_by_name[ - "key_set" -].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2._KEYSET -_MUTATION_DELETE.containing_type = _MUTATION -_MUTATION.fields_by_name["insert"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["update"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["insert_or_update"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["replace"].message_type = _MUTATION_WRITE -_MUTATION.fields_by_name["delete"].message_type = _MUTATION_DELETE -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["insert"]) -_MUTATION.fields_by_name["insert"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["update"]) -_MUTATION.fields_by_name["update"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append( - _MUTATION.fields_by_name["insert_or_update"] -) -_MUTATION.fields_by_name[ - "insert_or_update" -].containing_oneof = _MUTATION.oneofs_by_name["operation"] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["replace"]) -_MUTATION.fields_by_name["replace"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -_MUTATION.oneofs_by_name["operation"].fields.append(_MUTATION.fields_by_name["delete"]) -_MUTATION.fields_by_name["delete"].containing_oneof = _MUTATION.oneofs_by_name[ - "operation" -] -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - dict( - Write=_reflection.GeneratedProtocolMessageType( - "Write", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_WRITE, - __module__="google.cloud.spanner_v1.proto.mutation_pb2", - __doc__="""Arguments to [insert][google.spanner.v1.Mutation.insert], - [update][google.spanner.v1.Mutation.update], - [insert\_or\_update][google.spanner.v1.Mutation.insert\_or\_update], and - [replace][google.spanner.v1.Mutation.replace] operations. - - - Attributes: - table: - Required. The table whose rows will be written. - columns: - The names of the columns in - [table][google.spanner.v1.Mutation.Write.table] to be written. - The list of columns must contain enough columns to allow Cloud - Spanner to derive values for all primary key columns in the - row(s) to be modified. - values: - The values to be written. ``values`` can contain more than one - list of values. If it does, then multiple rows are written, - one for each entry in ``values``. Each list in ``values`` must - have exactly as many entries as there are entries in - [columns][google.spanner.v1.Mutation.Write.columns] above. - Sending multiple lists is equivalent to sending multiple - ``Mutation``\ s, each containing one ``values`` entry and - repeating [table][google.spanner.v1.Mutation.Write.table] and - [columns][google.spanner.v1.Mutation.Write.columns]. - Individual values in each list are encoded as described - [here][google.spanner.v1.TypeCode]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Write) - ), - ), - Delete=_reflection.GeneratedProtocolMessageType( - "Delete", - (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETE, - __module__="google.cloud.spanner_v1.proto.mutation_pb2", - __doc__="""Arguments to [delete][google.spanner.v1.Mutation.delete] - operations. - - - Attributes: - table: - Required. The table whose rows will be deleted. - key_set: - Required. The primary keys of the rows within - [table][google.spanner.v1.Mutation.Delete.table] to delete. - Delete is idempotent. The transaction will succeed even if - some or all rows do not exist. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Delete) - ), - ), - DESCRIPTOR=_MUTATION, - __module__="google.cloud.spanner_v1.proto.mutation_pb2", - __doc__="""A modification to one or more Cloud Spanner rows. - Mutations can be applied to a Cloud Spanner database by sending them in - a [Commit][google.spanner.v1.Spanner.Commit] call. - - - Attributes: - operation: - Required. The operation to perform. - insert: - Insert new rows in a table. If any of the rows already exist, - the write or transaction fails with error ``ALREADY_EXISTS``. - update: - Update existing rows in a table. If any of the rows does not - already exist, the transaction fails with error ``NOT_FOUND``. - insert_or_update: - Like [insert][google.spanner.v1.Mutation.insert], except that - if the row already exists, then its column values are - overwritten with the ones provided. Any column values not - explicitly written are preserved. - replace: - Like [insert][google.spanner.v1.Mutation.insert], except that - if the row already exists, it is deleted, and the column - values provided are inserted instead. Unlike [insert\_or\_upda - te][google.spanner.v1.Mutation.insert\_or\_update], this means - any values not explicitly written become ``NULL``. In an - interleaved table, if you create the child table with the ``ON - DELETE CASCADE`` annotation, then replacing a parent row also - deletes the child rows. Otherwise, you must delete the child - rows before you replace the parent row. - delete: - Delete rows from a table. Succeeds whether or not the named - rows were present. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation) - ), -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.Write) -_sym_db.RegisterMessage(Mutation.Delete) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/mutation_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/proto/query_plan.proto b/spanner/google/cloud/spanner_v1/proto/query_plan.proto deleted file mode 100644 index 2d6be2e2bd31..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/query_plan.proto +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/protobuf/struct.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "QueryPlanProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes]. -message PlanNode { - // Metadata associated with a parent-child relationship appearing in a - // [PlanNode][google.spanner.v1.PlanNode]. - message ChildLink { - // The node to which the link points. - int32 child_index = 1; - - // The type of the link. For example, in Hash Joins this could be used to - // distinguish between the build child and the probe child, or in the case - // of the child being an output variable, to represent the tag associated - // with the output variable. - string type = 2; - - // Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds - // to an output variable of the parent node. The field carries the name of - // the output variable. - // For example, a `TableScan` operator that reads rows from a table will - // have child links to the `SCALAR` nodes representing the output variables - // created for each column that is read by the operator. The corresponding - // `variable` fields will be set to the variable names assigned to the - // columns. - string variable = 3; - } - - // Condensed representation of a node and its subtree. Only present for - // `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode]. - message ShortRepresentation { - // A string representation of the expression subtree rooted at this node. - string description = 1; - - // A mapping of (subquery variable name) -> (subquery node id) for cases - // where the `description` string of this node references a `SCALAR` - // subquery contained in the expression subtree rooted at this node. The - // referenced `SCALAR` subquery may not necessarily be a direct child of - // this node. - map subqueries = 2; - } - - // The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of - // nodes that can appear in a query plan. - enum Kind { - // Not specified. - KIND_UNSPECIFIED = 0; - - // Denotes a Relational operator node in the expression tree. Relational - // operators represent iterative processing of rows during query execution. - // For example, a `TableScan` operation that reads rows from a table. - RELATIONAL = 1; - - // Denotes a Scalar node in the expression tree. Scalar nodes represent - // non-iterable entities in the query plan. For example, constants or - // arithmetic operators appearing inside predicate expressions or references - // to column names. - SCALAR = 2; - } - - // The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes]. - int32 index = 1; - - // Used to determine the type of node. May be needed for visualizing - // different kinds of nodes differently. For example, If the node is a - // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation - // which can be used to directly embed a description of the node in its - // parent. - Kind kind = 2; - - // The display name for the node. - string display_name = 3; - - // List of child node `index`es and their relationship to this parent. - repeated ChildLink child_links = 4; - - // Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. - ShortRepresentation short_representation = 5; - - // Attributes relevant to the node contained in a group of key-value pairs. - // For example, a Parameter Reference node could have the following - // information in its metadata: - // - // { - // "parameter_reference": "param1", - // "parameter_type": "array" - // } - google.protobuf.Struct metadata = 6; - - // The execution statistics associated with the node, contained in a group of - // key-value pairs. Only present if the plan was returned as a result of a - // profile query. For example, number of executions, number of rows/time per - // execution etc. - google.protobuf.Struct execution_stats = 7; -} - -// Contains an ordered list of nodes appearing in the query plan. -message QueryPlan { - // The nodes in the query plan. Plan nodes are returned in pre-order starting - // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in - // `plan_nodes`. - repeated PlanNode plan_nodes = 1; -} diff --git a/spanner/google/cloud/spanner_v1/proto/query_plan_pb2.py b/spanner/google/cloud/spanner_v1/proto/query_plan_pb2.py deleted file mode 100644 index bc715b454992..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/query_plan_pb2.py +++ /dev/null @@ -1,602 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/query_plan.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/query_plan.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\016QueryPlanProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n.google/cloud/spanner_v1/proto/query_plan.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\xf8\x04\n\x08PlanNode\x12\r\n\x05index\x18\x01 \x01(\x05\x12.\n\x04kind\x18\x02 \x01(\x0e\x32 .google.spanner.v1.PlanNode.Kind\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\x0b\x63hild_links\x18\x04 \x03(\x0b\x32%.google.spanner.v1.PlanNode.ChildLink\x12M\n\x14short_representation\x18\x05 \x01(\x0b\x32/.google.spanner.v1.PlanNode.ShortRepresentation\x12)\n\x08metadata\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x30\n\x0f\x65xecution_stats\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a@\n\tChildLink\x12\x13\n\x0b\x63hild_index\x18\x01 \x01(\x05\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x10\n\x08variable\x18\x03 \x01(\t\x1a\xb2\x01\n\x13ShortRepresentation\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12S\n\nsubqueries\x18\x02 \x03(\x0b\x32?.google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry\x1a\x31\n\x0fSubqueriesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01"8\n\x04Kind\x12\x14\n\x10KIND_UNSPECIFIED\x10\x00\x12\x0e\n\nRELATIONAL\x10\x01\x12\n\n\x06SCALAR\x10\x02"<\n\tQueryPlan\x12/\n\nplan_nodes\x18\x01 \x03(\x0b\x32\x1b.google.spanner.v1.PlanNodeB\x97\x01\n\x15\x63om.google.spanner.v1B\x0eQueryPlanProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_PLANNODE_KIND = _descriptor.EnumDescriptor( - name="Kind", - full_name="google.spanner.v1.PlanNode.Kind", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="KIND_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="RELATIONAL", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="SCALAR", index=2, number=2, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=706, - serialized_end=762, -) -_sym_db.RegisterEnumDescriptor(_PLANNODE_KIND) - - -_PLANNODE_CHILDLINK = _descriptor.Descriptor( - name="ChildLink", - full_name="google.spanner.v1.PlanNode.ChildLink", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="child_index", - full_name="google.spanner.v1.PlanNode.ChildLink.child_index", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.v1.PlanNode.ChildLink.type", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="variable", - full_name="google.spanner.v1.PlanNode.ChildLink.variable", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=523, -) - -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY = _descriptor.Descriptor( - name="SubqueriesEntry", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry.value", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=_b("8\001"), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=655, - serialized_end=704, -) - -_PLANNODE_SHORTREPRESENTATION = _descriptor.Descriptor( - name="ShortRepresentation", - full_name="google.spanner.v1.PlanNode.ShortRepresentation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="description", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.description", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="subqueries", - full_name="google.spanner.v1.PlanNode.ShortRepresentation.subqueries", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=526, - serialized_end=704, -) - -_PLANNODE = _descriptor.Descriptor( - name="PlanNode", - full_name="google.spanner.v1.PlanNode", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.spanner.v1.PlanNode.index", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="kind", - full_name="google.spanner.v1.PlanNode.kind", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.spanner.v1.PlanNode.display_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="child_links", - full_name="google.spanner.v1.PlanNode.child_links", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="short_representation", - full_name="google.spanner.v1.PlanNode.short_representation", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.PlanNode.metadata", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="execution_stats", - full_name="google.spanner.v1.PlanNode.execution_stats", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_PLANNODE_CHILDLINK, _PLANNODE_SHORTREPRESENTATION], - enum_types=[_PLANNODE_KIND], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=130, - serialized_end=762, -) - - -_QUERYPLAN = _descriptor.Descriptor( - name="QueryPlan", - full_name="google.spanner.v1.QueryPlan", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="plan_nodes", - full_name="google.spanner.v1.QueryPlan.plan_nodes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=764, - serialized_end=824, -) - -_PLANNODE_CHILDLINK.containing_type = _PLANNODE -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY.containing_type = ( - _PLANNODE_SHORTREPRESENTATION -) -_PLANNODE_SHORTREPRESENTATION.fields_by_name[ - "subqueries" -].message_type = _PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY -_PLANNODE_SHORTREPRESENTATION.containing_type = _PLANNODE -_PLANNODE.fields_by_name["kind"].enum_type = _PLANNODE_KIND -_PLANNODE.fields_by_name["child_links"].message_type = _PLANNODE_CHILDLINK -_PLANNODE.fields_by_name[ - "short_representation" -].message_type = _PLANNODE_SHORTREPRESENTATION -_PLANNODE.fields_by_name[ - "metadata" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_PLANNODE.fields_by_name[ - "execution_stats" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_PLANNODE_KIND.containing_type = _PLANNODE -_QUERYPLAN.fields_by_name["plan_nodes"].message_type = _PLANNODE -DESCRIPTOR.message_types_by_name["PlanNode"] = _PLANNODE -DESCRIPTOR.message_types_by_name["QueryPlan"] = _QUERYPLAN -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -PlanNode = _reflection.GeneratedProtocolMessageType( - "PlanNode", - (_message.Message,), - dict( - ChildLink=_reflection.GeneratedProtocolMessageType( - "ChildLink", - (_message.Message,), - dict( - DESCRIPTOR=_PLANNODE_CHILDLINK, - __module__="google.cloud.spanner_v1.proto.query_plan_pb2", - __doc__="""Metadata associated with a parent-child relationship - appearing in a [PlanNode][google.spanner.v1.PlanNode]. - - - Attributes: - child_index: - The node to which the link points. - type: - The type of the link. For example, in Hash Joins this could be - used to distinguish between the build child and the probe - child, or in the case of the child being an output variable, - to represent the tag associated with the output variable. - variable: - Only present if the child node is - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and - corresponds to an output variable of the parent node. The - field carries the name of the output variable. For example, a - ``TableScan`` operator that reads rows from a table will have - child links to the ``SCALAR`` nodes representing the output - variables created for each column that is read by the - operator. The corresponding ``variable`` fields will be set to - the variable names assigned to the columns. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ChildLink) - ), - ), - ShortRepresentation=_reflection.GeneratedProtocolMessageType( - "ShortRepresentation", - (_message.Message,), - dict( - SubqueriesEntry=_reflection.GeneratedProtocolMessageType( - "SubqueriesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY, - __module__="google.cloud.spanner_v1.proto.query_plan_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry) - ), - ), - DESCRIPTOR=_PLANNODE_SHORTREPRESENTATION, - __module__="google.cloud.spanner_v1.proto.query_plan_pb2", - __doc__="""Condensed representation of a node and its subtree. Only present for - ``SCALAR`` [PlanNode(s)][google.spanner.v1.PlanNode]. - - - Attributes: - description: - A string representation of the expression subtree rooted at - this node. - subqueries: - A mapping of (subquery variable name) -> (subquery node id) - for cases where the ``description`` string of this node - references a ``SCALAR`` subquery contained in the expression - subtree rooted at this node. The referenced ``SCALAR`` - subquery may not necessarily be a direct child of this node. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode.ShortRepresentation) - ), - ), - DESCRIPTOR=_PLANNODE, - __module__="google.cloud.spanner_v1.proto.query_plan_pb2", - __doc__="""Node information for nodes appearing in a - [QueryPlan.plan\_nodes][google.spanner.v1.QueryPlan.plan\_nodes]. - - - Attributes: - index: - The ``PlanNode``'s index in [node - list][google.spanner.v1.QueryPlan.plan\_nodes]. - kind: - Used to determine the type of node. May be needed for - visualizing different kinds of nodes differently. For example, - If the node is a - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will - have a condensed representation which can be used to directly - embed a description of the node in its parent. - display_name: - The display name for the node. - child_links: - List of child node ``index``\ es and their relationship to - this parent. - short_representation: - Condensed representation for - [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes. - metadata: - Attributes relevant to the node contained in a group of key- - value pairs. For example, a Parameter Reference node could - have the following information in its metadata: :: { - "parameter_reference": "param1", "parameter_type": - "array" } - execution_stats: - The execution statistics associated with the node, contained - in a group of key-value pairs. Only present if the plan was - returned as a result of a profile query. For example, number - of executions, number of rows/time per execution etc. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PlanNode) - ), -) -_sym_db.RegisterMessage(PlanNode) -_sym_db.RegisterMessage(PlanNode.ChildLink) -_sym_db.RegisterMessage(PlanNode.ShortRepresentation) -_sym_db.RegisterMessage(PlanNode.ShortRepresentation.SubqueriesEntry) - -QueryPlan = _reflection.GeneratedProtocolMessageType( - "QueryPlan", - (_message.Message,), - dict( - DESCRIPTOR=_QUERYPLAN, - __module__="google.cloud.spanner_v1.proto.query_plan_pb2", - __doc__="""Contains an ordered list of nodes appearing in the query - plan. - - - Attributes: - plan_nodes: - The nodes in the query plan. Plan nodes are returned in pre- - order starting with the plan root. Each - [PlanNode][google.spanner.v1.PlanNode]'s ``id`` corresponds to - its index in ``plan_nodes``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.QueryPlan) - ), -) -_sym_db.RegisterMessage(QueryPlan) - - -DESCRIPTOR._options = None -_PLANNODE_SHORTREPRESENTATION_SUBQUERIESENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/proto/result_set.proto b/spanner/google/cloud/spanner_v1/proto/result_set.proto deleted file mode 100644 index a4b785283cdf..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/result_set.proto +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/protobuf/struct.proto"; -import "google/spanner/v1/query_plan.proto"; -import "google/spanner/v1/transaction.proto"; -import "google/spanner/v1/type.proto"; -import "google/api/annotations.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "ResultSetProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// Results from [Read][google.spanner.v1.Spanner.Read] or -// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. -message ResultSet { - // Metadata about the result set, such as row type information. - ResultSetMetadata metadata = 1; - - // Each element in `rows` is a row whose format is defined by - // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element - // in each row matches the ith field in - // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are - // encoded based on type as described - // [here][google.spanner.v1.TypeCode]. - repeated google.protobuf.ListValue rows = 2; - - // Query plan and execution statistics for the SQL statement that - // produced this result set. These can be requested by setting - // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. - // DML statements always produce stats containing the number of rows - // modified, unless executed using the - // [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN] [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. - // Other fields may or may not be populated, based on the - // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]. - ResultSetStats stats = 3; -} - -// Partial results from a streaming read or SQL query. Streaming reads and -// SQL queries better tolerate large result sets, large rows, and large -// values, but are a little trickier to consume. -message PartialResultSet { - // Metadata about the result set, such as row type information. - // Only present in the first response. - ResultSetMetadata metadata = 1; - - // A streamed result set consists of a stream of values, which might - // be split into many `PartialResultSet` messages to accommodate - // large rows and/or large values. Every N complete values defines a - // row, where N is equal to the number of entries in - // [metadata.row_type.fields][google.spanner.v1.StructType.fields]. - // - // Most values are encoded based on type as described - // [here][google.spanner.v1.TypeCode]. - // - // It is possible that the last value in values is "chunked", - // meaning that the rest of the value is sent in subsequent - // `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value] - // field. Two or more chunked values can be merged to form a - // complete value as follows: - // - // * `bool/number/null`: cannot be chunked - // * `string`: concatenate the strings - // * `list`: concatenate the lists. If the last element in a list is a - // `string`, `list`, or `object`, merge it with the first element in - // the next list by applying these rules recursively. - // * `object`: concatenate the (field name, field value) pairs. If a - // field name is duplicated, then apply these rules recursively - // to merge the field values. - // - // Some examples of merging: - // - // # Strings are concatenated. - // "foo", "bar" => "foobar" - // - // # Lists of non-strings are concatenated. - // [2, 3], [4] => [2, 3, 4] - // - // # Lists are concatenated, but the last and first elements are merged - // # because they are strings. - // ["a", "b"], ["c", "d"] => ["a", "bc", "d"] - // - // # Lists are concatenated, but the last and first elements are merged - // # because they are lists. Recursively, the last and first elements - // # of the inner lists are merged because they are strings. - // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"] - // - // # Non-overlapping object fields are combined. - // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} - // - // # Overlapping object fields are merged. - // {"a": "1"}, {"a": "2"} => {"a": "12"} - // - // # Examples of merging objects containing lists of strings. - // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]} - // - // For a more complete example, suppose a streaming SQL query is - // yielding a result set whose rows contain a single string - // field. The following `PartialResultSet`s might be yielded: - // - // { - // "metadata": { ... } - // "values": ["Hello", "W"] - // "chunked_value": true - // "resume_token": "Af65..." - // } - // { - // "values": ["orl"] - // "chunked_value": true - // "resume_token": "Bqp2..." - // } - // { - // "values": ["d"] - // "resume_token": "Zx1B..." - // } - // - // This sequence of `PartialResultSet`s encodes two rows, one - // containing the field value `"Hello"`, and a second containing the - // field value `"World" = "W" + "orl" + "d"`. - repeated google.protobuf.Value values = 2; - - // If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must - // be combined with more values from subsequent `PartialResultSet`s - // to obtain a complete field value. - bool chunked_value = 3; - - // Streaming calls might be interrupted for a variety of reasons, such - // as TCP connection loss. If this occurs, the stream of results can - // be resumed by re-sending the original request and including - // `resume_token`. Note that executing any other transaction in the - // same session invalidates the token. - bytes resume_token = 4; - - // Query plan and execution statistics for the statement that produced this - // streaming result set. These can be requested by setting - // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent - // only once with the last response in the stream. - // This field will also be present in the last response for DML - // statements. - ResultSetStats stats = 5; -} - -// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. -message ResultSetMetadata { - // Indicates the field names and types for the rows in the result - // set. For example, a SQL query like `"SELECT UserId, UserName FROM - // Users"` could return a `row_type` value like: - // - // "fields": [ - // { "name": "UserId", "type": { "code": "INT64" } }, - // { "name": "UserName", "type": { "code": "STRING" } }, - // ] - StructType row_type = 1; - - // If the read or SQL query began a transaction as a side-effect, the - // information about the new transaction is yielded here. - Transaction transaction = 2; -} - -// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet]. -message ResultSetStats { - // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result. - QueryPlan query_plan = 1; - - // Aggregated statistics from the execution of the query. Only present when - // the query is profiled. For example, a query could return the statistics as - // follows: - // - // { - // "rows_returned": "3", - // "elapsed_time": "1.22 secs", - // "cpu_time": "1.19 secs" - // } - google.protobuf.Struct query_stats = 2; - - // The number of rows modified by the DML statement. - oneof row_count { - // Standard DML returns an exact count of rows that were modified. - int64 row_count_exact = 3; - - // Partitioned DML does not offer exactly-once semantics, so it - // returns a lower bound of the rows modified. - int64 row_count_lower_bound = 4; - } -} diff --git a/spanner/google/cloud/spanner_v1/proto/result_set_pb2.py b/spanner/google/cloud/spanner_v1/proto/result_set_pb2.py deleted file mode 100644 index 3740450e6b24..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/result_set_pb2.py +++ /dev/null @@ -1,625 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/result_set.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.cloud.spanner_v1.proto import ( - query_plan_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.cloud.spanner_v1.proto import ( - type_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2, -) -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/result_set.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\016ResultSetProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\370\001\001\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n.google/cloud/spanner_v1/proto/result_set.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a.google/cloud/spanner_v1/proto/query_plan.proto\x1a/google/cloud/spanner_v1/proto/transaction.proto\x1a(google/cloud/spanner_v1/proto/type.proto\x1a\x1cgoogle/api/annotations.proto"\x9f\x01\n\tResultSet\x12\x36\n\x08metadata\x18\x01 \x01(\x0b\x32$.google.spanner.v1.ResultSetMetadata\x12(\n\x04rows\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.ListValue\x12\x30\n\x05stats\x18\x03 \x01(\x0b\x32!.google.spanner.v1.ResultSetStats"\xd1\x01\n\x10PartialResultSet\x12\x36\n\x08metadata\x18\x01 \x01(\x0b\x32$.google.spanner.v1.ResultSetMetadata\x12&\n\x06values\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Value\x12\x15\n\rchunked_value\x18\x03 \x01(\x08\x12\x14\n\x0cresume_token\x18\x04 \x01(\x0c\x12\x30\n\x05stats\x18\x05 \x01(\x0b\x32!.google.spanner.v1.ResultSetStats"y\n\x11ResultSetMetadata\x12/\n\x08row_type\x18\x01 \x01(\x0b\x32\x1d.google.spanner.v1.StructType\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xb9\x01\n\x0eResultSetStats\x12\x30\n\nquery_plan\x18\x01 \x01(\x0b\x32\x1c.google.spanner.v1.QueryPlan\x12,\n\x0bquery_stats\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x19\n\x0frow_count_exact\x18\x03 \x01(\x03H\x00\x12\x1f\n\x15row_count_lower_bound\x18\x04 \x01(\x03H\x00\x42\x0b\n\trow_countB\x9a\x01\n\x15\x63om.google.spanner.v1B\x0eResultSetProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xf8\x01\x01\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.DESCRIPTOR, - google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_RESULTSET = _descriptor.Descriptor( - name="ResultSet", - full_name="google.spanner.v1.ResultSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.ResultSet.metadata", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.spanner.v1.ResultSet.rows", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="stats", - full_name="google.spanner.v1.ResultSet.stats", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=269, - serialized_end=428, -) - - -_PARTIALRESULTSET = _descriptor.Descriptor( - name="PartialResultSet", - full_name="google.spanner.v1.PartialResultSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.spanner.v1.PartialResultSet.metadata", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.spanner.v1.PartialResultSet.values", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="chunked_value", - full_name="google.spanner.v1.PartialResultSet.chunked_value", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="resume_token", - full_name="google.spanner.v1.PartialResultSet.resume_token", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="stats", - full_name="google.spanner.v1.PartialResultSet.stats", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=431, - serialized_end=640, -) - - -_RESULTSETMETADATA = _descriptor.Descriptor( - name="ResultSetMetadata", - full_name="google.spanner.v1.ResultSetMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="row_type", - full_name="google.spanner.v1.ResultSetMetadata.row_type", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="transaction", - full_name="google.spanner.v1.ResultSetMetadata.transaction", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=642, - serialized_end=763, -) - - -_RESULTSETSTATS = _descriptor.Descriptor( - name="ResultSetStats", - full_name="google.spanner.v1.ResultSetStats", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="query_plan", - full_name="google.spanner.v1.ResultSetStats.query_plan", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="query_stats", - full_name="google.spanner.v1.ResultSetStats.query_stats", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_count_exact", - full_name="google.spanner.v1.ResultSetStats.row_count_exact", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="row_count_lower_bound", - full_name="google.spanner.v1.ResultSetStats.row_count_lower_bound", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_count", - full_name="google.spanner.v1.ResultSetStats.row_count", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=766, - serialized_end=951, -) - -_RESULTSET.fields_by_name["metadata"].message_type = _RESULTSETMETADATA -_RESULTSET.fields_by_name[ - "rows" -].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE -_RESULTSET.fields_by_name["stats"].message_type = _RESULTSETSTATS -_PARTIALRESULTSET.fields_by_name["metadata"].message_type = _RESULTSETMETADATA -_PARTIALRESULTSET.fields_by_name[ - "values" -].message_type = google_dot_protobuf_dot_struct__pb2._VALUE -_PARTIALRESULTSET.fields_by_name["stats"].message_type = _RESULTSETSTATS -_RESULTSETMETADATA.fields_by_name[ - "row_type" -].message_type = google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2._STRUCTTYPE -_RESULTSETMETADATA.fields_by_name[ - "transaction" -].message_type = ( - google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION -) -_RESULTSETSTATS.fields_by_name[ - "query_plan" -].message_type = ( - google_dot_cloud_dot_spanner__v1_dot_proto_dot_query__plan__pb2._QUERYPLAN -) -_RESULTSETSTATS.fields_by_name[ - "query_stats" -].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT -_RESULTSETSTATS.oneofs_by_name["row_count"].fields.append( - _RESULTSETSTATS.fields_by_name["row_count_exact"] -) -_RESULTSETSTATS.fields_by_name[ - "row_count_exact" -].containing_oneof = _RESULTSETSTATS.oneofs_by_name["row_count"] -_RESULTSETSTATS.oneofs_by_name["row_count"].fields.append( - _RESULTSETSTATS.fields_by_name["row_count_lower_bound"] -) -_RESULTSETSTATS.fields_by_name[ - "row_count_lower_bound" -].containing_oneof = _RESULTSETSTATS.oneofs_by_name["row_count"] -DESCRIPTOR.message_types_by_name["ResultSet"] = _RESULTSET -DESCRIPTOR.message_types_by_name["PartialResultSet"] = _PARTIALRESULTSET -DESCRIPTOR.message_types_by_name["ResultSetMetadata"] = _RESULTSETMETADATA -DESCRIPTOR.message_types_by_name["ResultSetStats"] = _RESULTSETSTATS -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ResultSet = _reflection.GeneratedProtocolMessageType( - "ResultSet", - (_message.Message,), - dict( - DESCRIPTOR=_RESULTSET, - __module__="google.cloud.spanner_v1.proto.result_set_pb2", - __doc__="""Results from [Read][google.spanner.v1.Spanner.Read] or - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. - - - Attributes: - metadata: - Metadata about the result set, such as row type information. - rows: - Each element in ``rows`` is a row whose format is defined by [ - metadata.row\_type][google.spanner.v1.ResultSetMetadata.row\_t - ype]. The ith element in each row matches the ith field in [me - tadata.row\_type][google.spanner.v1.ResultSetMetadata.row\_typ - e]. Elements are encoded based on type as described - [here][google.spanner.v1.TypeCode]. - stats: - Query plan and execution statistics for the SQL statement that - produced this result set. These can be requested by setting [E - xecuteSqlRequest.query\_mode][google.spanner.v1.ExecuteSqlRequ - est.query\_mode]. DML statements always produce stats - containing the number of rows modified, unless executed using - the [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.Execu - teSqlRequest.QueryMode.PLAN] [ExecuteSqlRequest.query\_mode][g - oogle.spanner.v1.ExecuteSqlRequest.query\_mode]. Other fields - may or may not be populated, based on the [ExecuteSqlRequest.q - uery\_mode][google.spanner.v1.ExecuteSqlRequest.query\_mode]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSet) - ), -) -_sym_db.RegisterMessage(ResultSet) - -PartialResultSet = _reflection.GeneratedProtocolMessageType( - "PartialResultSet", - (_message.Message,), - dict( - DESCRIPTOR=_PARTIALRESULTSET, - __module__="google.cloud.spanner_v1.proto.result_set_pb2", - __doc__="""Partial results from a streaming read or SQL query. - Streaming reads and SQL queries better tolerate large result sets, large - rows, and large values, but are a little trickier to consume. - - - Attributes: - metadata: - Metadata about the result set, such as row type information. - Only present in the first response. - values: - A streamed result set consists of a stream of values, which - might be split into many ``PartialResultSet`` messages to - accommodate large rows and/or large values. Every N complete - values defines a row, where N is equal to the number of - entries in [metadata.row\_type.fields][google.spanner.v1.Struc - tType.fields]. Most values are encoded based on type as - described [here][google.spanner.v1.TypeCode]. It is possible - that the last value in values is "chunked", meaning that the - rest of the value is sent in subsequent ``PartialResultSet``\ - (s). This is denoted by the [chunked\_value][google.spanner.v1 - .PartialResultSet.chunked\_value] field. Two or more chunked - values can be merged to form a complete value as follows: - - ``bool/number/null``: cannot be chunked - ``string``: - concatenate the strings - ``list``: concatenate the lists. If - the last element in a list is a ``string``, ``list``, or - ``object``, merge it with the first element in the next - list by applying these rules recursively. - ``object``: - concatenate the (field name, field value) pairs. If a field - name is duplicated, then apply these rules recursively to - merge the field values. Some examples of merging: :: - # Strings are concatenated. "foo", "bar" => "foobar" - # Lists of non-strings are concatenated. [2, 3], [4] => - [2, 3, 4] # Lists are concatenated, but the last and - first elements are merged # because they are strings. - ["a", "b"], ["c", "d"] => ["a", "bc", "d"] # Lists are - concatenated, but the last and first elements are merged # - because they are lists. Recursively, the last and first - elements # of the inner lists are merged because they are - strings. ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", - "cd"], "e"] # Non-overlapping object fields are combined. - {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"} # - Overlapping object fields are merged. {"a": "1"}, {"a": - "2"} => {"a": "12"} # Examples of merging objects - containing lists of strings. {"a": ["1"]}, {"a": ["2"]} => - {"a": ["12"]} For a more complete example, suppose a - streaming SQL query is yielding a result set whose rows - contain a single string field. The following - ``PartialResultSet``\ s might be yielded: :: { - "metadata": { ... } "values": ["Hello", "W"] - "chunked_value": true "resume_token": "Af65..." } - { "values": ["orl"] "chunked_value": true - "resume_token": "Bqp2..." } { "values": ["d"] - "resume_token": "Zx1B..." } This sequence of - ``PartialResultSet``\ s encodes two rows, one containing the - field value ``"Hello"``, and a second containing the field - value ``"World" = "W" + "orl" + "d"``. - chunked_value: - If true, then the final value in - [values][google.spanner.v1.PartialResultSet.values] is - chunked, and must be combined with more values from subsequent - ``PartialResultSet``\ s to obtain a complete field value. - resume_token: - Streaming calls might be interrupted for a variety of reasons, - such as TCP connection loss. If this occurs, the stream of - results can be resumed by re-sending the original request and - including ``resume_token``. Note that executing any other - transaction in the same session invalidates the token. - stats: - Query plan and execution statistics for the statement that - produced this streaming result set. These can be requested by - setting [ExecuteSqlRequest.query\_mode][google.spanner.v1.Exec - uteSqlRequest.query\_mode] and are sent only once with the - last response in the stream. This field will also be present - in the last response for DML statements. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartialResultSet) - ), -) -_sym_db.RegisterMessage(PartialResultSet) - -ResultSetMetadata = _reflection.GeneratedProtocolMessageType( - "ResultSetMetadata", - (_message.Message,), - dict( - DESCRIPTOR=_RESULTSETMETADATA, - __module__="google.cloud.spanner_v1.proto.result_set_pb2", - __doc__="""Metadata about a [ResultSet][google.spanner.v1.ResultSet] - or [PartialResultSet][google.spanner.v1.PartialResultSet]. - - - Attributes: - row_type: - Indicates the field names and types for the rows in the result - set. For example, a SQL query like ``"SELECT UserId, UserName - FROM Users"`` could return a ``row_type`` value like: :: - "fields": [ { "name": "UserId", "type": { "code": - "INT64" } }, { "name": "UserName", "type": { "code": - "STRING" } }, ] - transaction: - If the read or SQL query began a transaction as a side-effect, - the information about the new transaction is yielded here. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetMetadata) - ), -) -_sym_db.RegisterMessage(ResultSetMetadata) - -ResultSetStats = _reflection.GeneratedProtocolMessageType( - "ResultSetStats", - (_message.Message,), - dict( - DESCRIPTOR=_RESULTSETSTATS, - __module__="google.cloud.spanner_v1.proto.result_set_pb2", - __doc__="""Additional statistics about a - [ResultSet][google.spanner.v1.ResultSet] or - [PartialResultSet][google.spanner.v1.PartialResultSet]. - - - Attributes: - query_plan: - [QueryPlan][google.spanner.v1.QueryPlan] for the query - associated with this result. - query_stats: - Aggregated statistics from the execution of the query. Only - present when the query is profiled. For example, a query could - return the statistics as follows: :: { - "rows_returned": "3", "elapsed_time": "1.22 secs", - "cpu_time": "1.19 secs" } - row_count: - The number of rows modified by the DML statement. - row_count_exact: - Standard DML returns an exact count of rows that were - modified. - row_count_lower_bound: - Partitioned DML does not offer exactly-once semantics, so it - returns a lower bound of the rows modified. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ResultSetStats) - ), -) -_sym_db.RegisterMessage(ResultSetStats) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/result_set_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/proto/spanner.proto b/spanner/google/cloud/spanner_v1/proto/spanner.proto deleted file mode 100644 index 2ff4c8db8908..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/spanner.proto +++ /dev/null @@ -1,913 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; -import "google/spanner/v1/keys.proto"; -import "google/spanner/v1/mutation.proto"; -import "google/spanner/v1/result_set.proto"; -import "google/spanner/v1/transaction.proto"; -import "google/spanner/v1/type.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "SpannerProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// The Database resource is defined in `google.spanner.admin.database.v1`. -// Because this is a separate, independent API (technically), we redefine -// the resource name pattern here. -option (google.api.resource_definition) = { - type: "spanner.googleapis.com/Database" - pattern: "projects/{project}/instances/{instance}/databases/{database}" -}; - -// Cloud Spanner API -// -// The Cloud Spanner API can be used to manage sessions and execute -// transactions on data stored in Cloud Spanner databases. -service Spanner { - option (google.api.default_host) = "spanner.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/spanner.data"; - - // Creates a new session. A session can be used to perform - // transactions that read and/or modify data in a Cloud Spanner database. - // Sessions are meant to be reused for many consecutive - // transactions. - // - // Sessions can only execute one transaction at a time. To execute - // multiple concurrent read-write/write-only transactions, create - // multiple sessions. Note that standalone reads and queries use a - // transaction internally, and count toward the one transaction - // limit. - // - // Active sessions use additional server resources, so it is a good idea to - // delete idle and unneeded sessions. - // Aside from explicit deletes, Cloud Spanner can delete sessions for which no - // operations are sent for more than an hour. If a session is deleted, - // requests to it return `NOT_FOUND`. - // - // Idle sessions can be kept alive by sending a trivial SQL query - // periodically, e.g., `"SELECT 1"`. - rpc CreateSession(CreateSessionRequest) returns (Session) { - option (google.api.http) = { - post: "/v1/{database=projects/*/instances/*/databases/*}/sessions" - body: "*" - }; - option (google.api.method_signature) = "database"; - } - - // Creates multiple new sessions. - // - // This API can be used to initialize a session cache on the clients. - // See https://goo.gl/TgSFN2 for best practices on session cache management. - rpc BatchCreateSessions(BatchCreateSessionsRequest) - returns (BatchCreateSessionsResponse) { - option (google.api.http) = { - post: "/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate" - body: "*" - }; - option (google.api.method_signature) = "database,session_count"; - } - - // Gets a session. Returns `NOT_FOUND` if the session does not exist. - // This is mainly useful for determining whether a session is still - // alive. - rpc GetSession(GetSessionRequest) returns (Session) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists all sessions in a given database. - rpc ListSessions(ListSessionsRequest) returns (ListSessionsResponse) { - option (google.api.http) = { - get: "/v1/{database=projects/*/instances/*/databases/*}/sessions" - }; - option (google.api.method_signature) = "database"; - } - - // Ends a session, releasing server resources associated with it. This will - // asynchronously trigger cancellation of any operations that are running with - // this session. - rpc DeleteSession(DeleteSessionRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/instances/*/databases/*/sessions/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Executes an SQL statement, returning all results in a single reply. This - // method cannot be used to return a result set larger than 10 MiB; - // if the query yields more data than that, the query fails with - // a `FAILED_PRECONDITION` error. - // - // Operations inside read-write transactions might return `ABORTED`. If - // this occurs, the application should restart the transaction from - // the beginning. See [Transaction][google.spanner.v1.Transaction] for more - // details. - // - // Larger result sets can be fetched in streaming fashion by calling - // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] - // instead. - rpc ExecuteSql(ExecuteSqlRequest) returns (ResultSet) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql" - body: "*" - }; - } - - // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the - // result set as a stream. Unlike - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on - // the size of the returned result set. However, no individual row in the - // result set can exceed 100 MiB, and no column value can exceed 10 MiB. - rpc ExecuteStreamingSql(ExecuteSqlRequest) returns (stream PartialResultSet) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql" - body: "*" - }; - } - - // Executes a batch of SQL DML statements. This method allows many statements - // to be run with lower latency than submitting them sequentially with - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. - // - // Statements are executed in sequential order. A request can succeed even if - // a statement fails. The - // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] - // field in the response provides information about the statement that failed. - // Clients must inspect this field to determine whether an error occurred. - // - // Execution stops after the first failed statement; the remaining statements - // are not executed. - rpc ExecuteBatchDml(ExecuteBatchDmlRequest) - returns (ExecuteBatchDmlResponse) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml" - body: "*" - }; - } - - // Reads rows from the database using key lookups and scans, as a - // simple key/value style alternative to - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be - // used to return a result set larger than 10 MiB; if the read matches more - // data than that, the read fails with a `FAILED_PRECONDITION` - // error. - // - // Reads inside read-write transactions might return `ABORTED`. If - // this occurs, the application should restart the transaction from - // the beginning. See [Transaction][google.spanner.v1.Transaction] for more - // details. - // - // Larger result sets can be yielded in streaming fashion by calling - // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. - rpc Read(ReadRequest) returns (ResultSet) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read" - body: "*" - }; - } - - // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set - // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no - // limit on the size of the returned result set. However, no individual row in - // the result set can exceed 100 MiB, and no column value can exceed - // 10 MiB. - rpc StreamingRead(ReadRequest) returns (stream PartialResultSet) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead" - body: "*" - }; - } - - // Begins a new transaction. This step can often be skipped: - // [Read][google.spanner.v1.Spanner.Read], - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and - // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a - // side-effect. - rpc BeginTransaction(BeginTransactionRequest) returns (Transaction) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction" - body: "*" - }; - option (google.api.method_signature) = "session,options"; - } - - // Commits a transaction. The request includes the mutations to be - // applied to rows in the database. - // - // `Commit` might return an `ABORTED` error. This can occur at any time; - // commonly, the cause is conflicts with concurrent - // transactions. However, it can also happen for a variety of other - // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt - // the transaction from the beginning, re-using the same session. - rpc Commit(CommitRequest) returns (CommitResponse) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit" - body: "*" - }; - option (google.api.method_signature) = "session,transaction_id,mutations"; - option (google.api.method_signature) = - "session,single_use_transaction,mutations"; - } - - // Rolls back a transaction, releasing any locks it holds. It is a good - // idea to call this for any transaction that includes one or more - // [Read][google.spanner.v1.Spanner.Read] or - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately - // decides not to commit. - // - // `Rollback` returns `OK` if it successfully aborts the transaction, the - // transaction was already aborted, or the transaction is not - // found. `Rollback` never returns `ABORTED`. - rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback" - body: "*" - }; - option (google.api.method_signature) = "session,transaction_id"; - } - - // Creates a set of partition tokens that can be used to execute a query - // operation in parallel. Each of the returned partition tokens can be used - // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to - // specify a subset of the query result to read. The same session and - // read-only transaction must be used by the PartitionQueryRequest used to - // create the partition tokens and the ExecuteSqlRequests that use the - // partition tokens. - // - // Partition tokens become invalid when the session used to create them - // is deleted, is idle for too long, begins a new transaction, or becomes too - // old. When any of these happen, it is not possible to resume the query, and - // the whole operation must be restarted from the beginning. - rpc PartitionQuery(PartitionQueryRequest) returns (PartitionResponse) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery" - body: "*" - }; - } - - // Creates a set of partition tokens that can be used to execute a read - // operation in parallel. Each of the returned partition tokens can be used - // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a - // subset of the read result to read. The same session and read-only - // transaction must be used by the PartitionReadRequest used to create the - // partition tokens and the ReadRequests that use the partition tokens. There - // are no ordering guarantees on rows returned among the returned partition - // tokens, or even within each individual StreamingRead call issued with a - // partition_token. - // - // Partition tokens become invalid when the session used to create them - // is deleted, is idle for too long, begins a new transaction, or becomes too - // old. When any of these happen, it is not possible to resume the read, and - // the whole operation must be restarted from the beginning. - rpc PartitionRead(PartitionReadRequest) returns (PartitionResponse) { - option (google.api.http) = { - post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead" - body: "*" - }; - } -} - -// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession]. -message CreateSessionRequest { - // Required. The database in which the new session is created. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; - - // The session to create. - Session session = 2; -} - -// The request for -// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. -message BatchCreateSessionsRequest { - // Required. The database in which the new sessions are created. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; - - // Parameters to be applied to each created session. - Session session_template = 2; - - // Required. The number of sessions to be created in this batch call. - // The API may return fewer than the requested number of sessions. If a - // specific number of sessions are desired, the client can make additional - // calls to BatchCreateSessions (adjusting - // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] - // as necessary). - int32 session_count = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// The response for -// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. -message BatchCreateSessionsResponse { - // The freshly created sessions. - repeated Session session = 1; -} - -// A session in the Cloud Spanner API. -message Session { - option (google.api.resource) = { - type: "spanner.googleapis.com/Session" - pattern: "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}" - }; - - // The name of the session. This is always system-assigned; values provided - // when creating a session are ignored. - string name = 1; - - // The labels for the session. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given session. - // - // See https://goo.gl/xmQnxf for more information on and examples of labels. - map labels = 2; - - // Output only. The timestamp when the session is created. - google.protobuf.Timestamp create_time = 3; - - // Output only. The approximate timestamp when the session is last used. It is - // typically earlier than the actual last use time. - google.protobuf.Timestamp approximate_last_use_time = 4; -} - -// The request for [GetSession][google.spanner.v1.Spanner.GetSession]. -message GetSessionRequest { - // Required. The name of the session to retrieve. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; -} - -// The request for [ListSessions][google.spanner.v1.Spanner.ListSessions]. -message ListSessionsRequest { - // Required. The database in which to list sessions. - string database = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - } - ]; - - // Number of sessions to be returned in the response. If 0 or less, defaults - // to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] - // from a previous - // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. - string page_token = 3; - - // An expression for filtering the results of the request. Filter rules are - // case insensitive. The fields eligible for filtering are: - // - // * `labels.key` where key is the name of a label - // - // Some examples of using filters are: - // - // * `labels.env:*` --> The session has the label "env". - // * `labels.env:dev` --> The session has the label "env" and the value of - // the label contains the string "dev". - string filter = 4; -} - -// The response for [ListSessions][google.spanner.v1.Spanner.ListSessions]. -message ListSessionsResponse { - // The list of requested sessions. - repeated Session sessions = 1; - - // `next_page_token` can be sent in a subsequent - // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more - // of the matching sessions. - string next_page_token = 2; -} - -// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. -message DeleteSessionRequest { - // Required. The name of the session to delete. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; -} - -// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and -// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. -message ExecuteSqlRequest { - // Mode in which the statement must be processed. - enum QueryMode { - // The default mode. Only the statement results are returned. - NORMAL = 0; - - // This mode returns only the query plan, without any results or - // execution statistics information. - PLAN = 1; - - // This mode returns both the query plan and the execution statistics along - // with the results. - PROFILE = 2; - } - - // Required. The session in which the SQL query should be performed. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // The transaction to use. - // - // For queries, if none is provided, the default is a temporary read-only - // transaction with strong concurrency. - // - // Standard DML statements require a read-write transaction. To protect - // against replays, single-use transactions are not supported. The caller - // must either supply an existing transaction ID or begin a new transaction. - // - // Partitioned DML requires an existing Partitioned DML transaction ID. - TransactionSelector transaction = 2; - - // Required. The SQL string. - string sql = 3 [(google.api.field_behavior) = REQUIRED]; - - // Parameter names and values that bind to placeholders in the SQL string. - // - // A parameter placeholder consists of the `@` character followed by the - // parameter name (for example, `@firstName`). Parameter names can contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. The same - // parameter name can be used more than once, for example: - // - // `"WHERE id > @msg_id AND id < @msg_id + 100"` - // - // It is an error to execute a SQL statement with unbound parameters. - google.protobuf.Struct params = 4; - - // It is not always possible for Cloud Spanner to infer the right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL statement parameters. See the - // definition of [Type][google.spanner.v1.Type] for more information - // about SQL types. - map param_types = 5; - - // If this request is resuming a previously interrupted SQL statement - // execution, `resume_token` should be copied from the last - // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the - // interruption. Doing this enables the new SQL statement execution to resume - // where the last one left off. The rest of the request parameters must - // exactly match the request that yielded this token. - bytes resume_token = 6; - - // Used to control the amount of debugging information returned in - // [ResultSetStats][google.spanner.v1.ResultSetStats]. If - // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is - // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only - // be set to - // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. - QueryMode query_mode = 7; - - // If present, results will be restricted to the specified partition - // previously created using PartitionQuery(). There must be an exact - // match for the values of fields common to this message and the - // PartitionQueryRequest message used to create this partition_token. - bytes partition_token = 8; - - // A per-transaction sequence number used to identify this request. This field - // makes each request idempotent such that if the request is received multiple - // times, at most one will succeed. - // - // The sequence number must be monotonically increasing within the - // transaction. If a request arrives for the first time with an out-of-order - // sequence number, the transaction may be aborted. Replays of previously - // handled requests will yield the same response as the first execution. - // - // Required for DML statements. Ignored for queries. - int64 seqno = 9; -} - -// The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. -message ExecuteBatchDmlRequest { - // A single DML statement. - message Statement { - // Required. The DML string. - string sql = 1; - - // Parameter names and values that bind to placeholders in the DML string. - // - // A parameter placeholder consists of the `@` character followed by the - // parameter name (for example, `@firstName`). Parameter names can contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. The - // same parameter name can be used more than once, for example: - // - // `"WHERE id > @msg_id AND id < @msg_id + 100"` - // - // It is an error to execute a SQL statement with unbound parameters. - google.protobuf.Struct params = 2; - - // It is not always possible for Cloud Spanner to infer the right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as - // JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL statement parameters. See the - // definition of [Type][google.spanner.v1.Type] for more information - // about SQL types. - map param_types = 3; - } - - // Required. The session in which the DML statements should be performed. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Required. The transaction to use. Must be a read-write transaction. - // - // To protect against replays, single-use transactions are not supported. The - // caller must either supply an existing transaction ID or begin a new - // transaction. - TransactionSelector transaction = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The list of statements to execute in this batch. Statements are - // executed serially, such that the effects of statement `i` are visible to - // statement `i+1`. Each statement must be a DML statement. Execution stops at - // the first failed statement; the remaining statements are not executed. - // - // Callers must provide at least one statement. - repeated Statement statements = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. A per-transaction sequence number used to identify this request. - // This field makes each request idempotent such that if the request is - // received multiple times, at most one will succeed. - // - // The sequence number must be monotonically increasing within the - // transaction. If a request arrives for the first time with an out-of-order - // sequence number, the transaction may be aborted. Replays of previously - // handled requests will yield the same response as the first execution. - int64 seqno = 4 [(google.api.field_behavior) = REQUIRED]; -} - -// The response for -// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list -// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML -// statement that has successfully executed, in the same order as the statements -// in the request. If a statement fails, the status in the response body -// identifies the cause of the failure. -// -// To check for DML statements that failed, use the following approach: -// -// 1. Check the status in the response message. The -// [google.rpc.Code][google.rpc.Code] enum -// value `OK` indicates that all statements were executed successfully. -// 2. If the status was not `OK`, check the number of result sets in the -// response. If the response contains `N` -// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in -// the request failed. -// -// Example 1: -// -// * Request: 5 DML statements, all executed successfully. -// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the -// status `OK`. -// -// Example 2: -// -// * Request: 5 DML statements. The third statement has a syntax error. -// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax -// error (`INVALID_ARGUMENT`) -// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages -// indicates that the third statement failed, and the fourth and fifth -// statements were not executed. -message ExecuteBatchDmlResponse { - // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the - // request that ran successfully, in the same order as the statements in the - // request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any - // rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each - // [ResultSet][google.spanner.v1.ResultSet] contain the number of rows - // modified by the statement. - // - // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response - // contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. - repeated ResultSet result_sets = 1; - - // If all DML statements are executed successfully, the status is `OK`. - // Otherwise, the error status of the first failed statement. - google.rpc.Status status = 2; -} - -// Options for a PartitionQueryRequest and -// PartitionReadRequest. -message PartitionOptions { - // **Note:** This hint is currently ignored by PartitionQuery and - // PartitionRead requests. - // - // The desired data size for each partition generated. The default for this - // option is currently 1 GiB. This is only a hint. The actual size of each - // partition may be smaller or larger than this size request. - int64 partition_size_bytes = 1; - - // **Note:** This hint is currently ignored by PartitionQuery and - // PartitionRead requests. - // - // The desired maximum number of partitions to return. For example, this may - // be set to the number of workers available. The default for this option - // is currently 10,000. The maximum value is currently 200,000. This is only - // a hint. The actual number of partitions returned may be smaller or larger - // than this maximum count request. - int64 max_partitions = 2; -} - -// The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] -message PartitionQueryRequest { - // Required. The session used to create the partitions. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Read only snapshot transactions are supported, read/write and single use - // transactions are not. - TransactionSelector transaction = 2; - - // Required. The query request to generate partitions for. The request will - // fail if the query is not root partitionable. The query plan of a root - // partitionable query has a single distributed union operator. A distributed - // union operator conceptually divides one or more tables into multiple - // splits, remotely evaluates a subquery independently on each split, and - // then unions all results. - // - // This must not contain DML commands, such as INSERT, UPDATE, or - // DELETE. Use - // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a - // PartitionedDml transaction for large, partition-friendly DML operations. - string sql = 3 [(google.api.field_behavior) = REQUIRED]; - - // Parameter names and values that bind to placeholders in the SQL string. - // - // A parameter placeholder consists of the `@` character followed by the - // parameter name (for example, `@firstName`). Parameter names can contain - // letters, numbers, and underscores. - // - // Parameters can appear anywhere that a literal value is expected. The same - // parameter name can be used more than once, for example: - // - // `"WHERE id > @msg_id AND id < @msg_id + 100"` - // - // It is an error to execute a SQL statement with unbound parameters. - google.protobuf.Struct params = 4; - - // It is not always possible for Cloud Spanner to infer the right SQL type - // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. - // - // In these cases, `param_types` can be used to specify the exact - // SQL type for some or all of the SQL query parameters. See the - // definition of [Type][google.spanner.v1.Type] for more information - // about SQL types. - map param_types = 5; - - // Additional options that affect how many partitions are created. - PartitionOptions partition_options = 6; -} - -// The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead] -message PartitionReadRequest { - // Required. The session used to create the partitions. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Read only snapshot transactions are supported, read/write and single use - // transactions are not. - TransactionSelector transaction = 2; - - // Required. The name of the table in the database to be read. - string table = 3 [(google.api.field_behavior) = REQUIRED]; - - // If non-empty, the name of an index on - // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used - // instead of the table primary key when interpreting - // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting - // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] - // for further information. - string index = 4; - - // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be - // returned for each row matching this request. - repeated string columns = 5; - - // Required. `key_set` identifies the rows to be yielded. `key_set` names the - // primary keys of the rows in - // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless - // [index][google.spanner.v1.PartitionReadRequest.index] is present. If - // [index][google.spanner.v1.PartitionReadRequest.index] is present, then - // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names - // index keys in [index][google.spanner.v1.PartitionReadRequest.index]. - // - // It is not an error for the `key_set` to name rows that do not - // exist in the database. Read yields nothing for nonexistent rows. - KeySet key_set = 6 [(google.api.field_behavior) = REQUIRED]; - - // Additional options that affect how many partitions are created. - PartitionOptions partition_options = 9; -} - -// Information returned for each partition returned in a -// PartitionResponse. -message Partition { - // This token can be passed to Read, StreamingRead, ExecuteSql, or - // ExecuteStreamingSql requests to restrict the results to those identified by - // this partition token. - bytes partition_token = 1; -} - -// The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] -// or [PartitionRead][google.spanner.v1.Spanner.PartitionRead] -message PartitionResponse { - // Partitions created by this request. - repeated Partition partitions = 1; - - // Transaction created by this request. - Transaction transaction = 2; -} - -// The request for [Read][google.spanner.v1.Spanner.Read] and -// [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. -message ReadRequest { - // Required. The session in which the read should be performed. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // The transaction to use. If none is provided, the default is a - // temporary read-only transaction with strong concurrency. - TransactionSelector transaction = 2; - - // Required. The name of the table in the database to be read. - string table = 3 [(google.api.field_behavior) = REQUIRED]; - - // If non-empty, the name of an index on - // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of - // the table primary key when interpreting - // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. - // See [key_set][google.spanner.v1.ReadRequest.key_set] for further - // information. - string index = 4; - - // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be - // returned for each row matching this request. - repeated string columns = 5 [(google.api.field_behavior) = REQUIRED]; - - // Required. `key_set` identifies the rows to be yielded. `key_set` names the - // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to - // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. - // If [index][google.spanner.v1.ReadRequest.index] is present, then - // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys - // in [index][google.spanner.v1.ReadRequest.index]. - // - // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] - // field is empty, rows are yielded in table primary key order (if - // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order - // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the - // [partition_token][google.spanner.v1.ReadRequest.partition_token] field is - // not empty, rows will be yielded in an unspecified order. - // - // It is not an error for the `key_set` to name rows that do not - // exist in the database. Read yields nothing for nonexistent rows. - KeySet key_set = 6 [(google.api.field_behavior) = REQUIRED]; - - // If greater than zero, only the first `limit` rows are yielded. If `limit` - // is zero, the default is no limit. A limit cannot be specified if - // `partition_token` is set. - int64 limit = 8; - - // If this request is resuming a previously interrupted read, - // `resume_token` should be copied from the last - // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the - // interruption. Doing this enables the new read to resume where the last read - // left off. The rest of the request parameters must exactly match the request - // that yielded this token. - bytes resume_token = 9; - - // If present, results will be restricted to the specified partition - // previously created using PartitionRead(). There must be an exact - // match for the values of fields common to this message and the - // PartitionReadRequest message used to create this partition_token. - bytes partition_token = 10; -} - -// The request for -// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. -message BeginTransactionRequest { - // Required. The session in which the transaction runs. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Required. Options for the new transaction. - TransactionOptions options = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [Commit][google.spanner.v1.Spanner.Commit]. -message CommitRequest { - // Required. The session in which the transaction to be committed is running. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Required. The transaction in which to commit. - oneof transaction { - // Commit a previously-started transaction. - bytes transaction_id = 2; - - // Execute mutations in a temporary transaction. Note that unlike - // commit of a previously-started transaction, commit with a - // temporary transaction is non-idempotent. That is, if the - // `CommitRequest` is sent to Cloud Spanner more than once (for - // instance, due to retries in the application, or in the - // transport library), it is possible that the mutations are - // executed more than once. If this is undesirable, use - // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and - // [Commit][google.spanner.v1.Spanner.Commit] instead. - TransactionOptions single_use_transaction = 3; - } - - // The mutations to be executed when this transaction commits. All - // mutations are applied atomically, in the order they appear in - // this list. - repeated Mutation mutations = 4; -} - -// The response for [Commit][google.spanner.v1.Spanner.Commit]. -message CommitResponse { - // The Cloud Spanner timestamp at which the transaction committed. - google.protobuf.Timestamp commit_timestamp = 1; -} - -// The request for [Rollback][google.spanner.v1.Spanner.Rollback]. -message RollbackRequest { - // Required. The session in which the transaction to roll back is running. - string session = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } - ]; - - // Required. The transaction to roll back. - bytes transaction_id = 2 [(google.api.field_behavior) = REQUIRED]; -} diff --git a/spanner/google/cloud/spanner_v1/proto/spanner_database_admin.proto b/spanner/google/cloud/spanner_v1/proto/spanner_database_admin.proto deleted file mode 100644 index 56dbff19e17b..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/spanner_database_admin.proto +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.spanner.admin.database.v1; - -import "google/api/annotations.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; -option java_multiple_files = true; -option java_outer_classname = "SpannerDatabaseAdminProto"; -option java_package = "com.google.spanner.admin.database.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; - - -// Cloud Spanner Database Admin API -// -// The Cloud Spanner Database Admin API can be used to create, drop, and -// list databases. It also enables updating the schema of pre-existing -// databases. -service DatabaseAdmin { - // Lists Cloud Spanner databases. - rpc ListDatabases(ListDatabasesRequest) returns (ListDatabasesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*/instances/*}/databases" - }; - } - - // Creates a new Cloud Spanner database and starts to prepare it for serving. - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track preparation of the database. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The - // [response][google.longrunning.Operation.response] field type is - // [Database][google.spanner.admin.database.v1.Database], if successful. - rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*/instances/*}/databases" - body: "*" - }; - } - - // Gets the state of a Cloud Spanner database. - rpc GetDatabase(GetDatabaseRequest) returns (Database) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*/databases/*}" - }; - } - - // Updates the schema of a Cloud Spanner database by - // creating/altering/dropping tables, columns, indexes, etc. The returned - // [long-running operation][google.longrunning.Operation] will have a name of - // the format `/operations/` and can be used to - // track execution of the schema change(s). The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. - rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - body: "*" - }; - } - - // Drops (aka deletes) a Cloud Spanner database. - rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{database=projects/*/instances/*/databases/*}" - }; - } - - // Returns the schema of a Cloud Spanner database as a list of formatted - // DDL statements. This method does not show pending schema updates, those may - // be queried using the [Operations][google.longrunning.Operations] API. - rpc GetDatabaseDdl(GetDatabaseDdlRequest) returns (GetDatabaseDdlResponse) { - option (google.api.http) = { - get: "/v1/{database=projects/*/instances/*/databases/*}/ddl" - }; - } - - // Sets the access control policy on a database resource. Replaces any - // existing policy. - // - // Authorization requires `spanner.databases.setIamPolicy` permission on - // [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" - body: "*" - }; - } - - // Gets the access control policy for a database resource. Returns an empty - // policy if a database exists but does not have a policy set. - // - // Authorization requires `spanner.databases.getIamPolicy` permission on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified database resource. - // - // Attempting this RPC on a non-existent Cloud Spanner database will result in - // a NOT_FOUND error if the user has `spanner.databases.list` permission on - // the containing Cloud Spanner instance. Otherwise returns an empty set of - // permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" - body: "*" - }; - } -} - -// A Cloud Spanner database. -message Database { - // Indicates the current state of the database. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The database is still being created. Operations on the database may fail - // with `FAILED_PRECONDITION` in this state. - CREATING = 1; - - // The database is fully created and ready for use. - READY = 2; - } - - // Required. The name of the database. Values are of the form - // `projects//instances//databases/`, - // where `` is as specified in the `CREATE DATABASE` - // statement. This name can be passed to other API methods to - // identify the database. - string name = 1; - - // Output only. The current database state. - State state = 2; -} - -// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesRequest { - // Required. The instance whose databases should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // Number of databases to be returned in the response. If 0 or less, - // defaults to the server's maximum allowed page size. - int32 page_size = 3; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a - // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. - string page_token = 4; -} - -// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. -message ListDatabasesResponse { - // Databases that matched the request. - repeated Database databases = 1; - - // `next_page_token` can be sent in a subsequent - // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more - // of the matching databases. - string next_page_token = 2; -} - -// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseRequest { - // Required. The name of the instance that will serve the new database. - // Values are of the form `projects//instances/`. - string parent = 1; - - // Required. A `CREATE DATABASE` statement, which specifies the ID of the - // new database. The database ID must conform to the regular expression - // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length. - // If the database ID is a reserved word or if it contains a hyphen, the - // database ID must be enclosed in backticks (`` ` ``). - string create_statement = 2; - - // An optional list of DDL statements to run inside the newly created - // database. Statements can create tables, indexes, etc. These - // statements execute atomically with the creation of the database: - // if there is an error in any statement, the database is not created. - repeated string extra_statements = 3; -} - -// Metadata type for the operation returned by -// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. -message CreateDatabaseMetadata { - // The database being created. - string database = 1; -} - -// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. -message GetDatabaseRequest { - // Required. The name of the requested database. Values are of the form - // `projects//instances//databases/`. - string name = 1; -} - -// Enqueues the given DDL statements to be applied, in order but not -// necessarily all at once, to the database schema at some point (or -// points) in the future. The server checks that the statements -// are executable (syntactically valid, name tables that exist, etc.) -// before enqueueing them, but they may still fail upon -// later execution (e.g., if a statement from another batch of -// statements is applied first and it conflicts in some way, or if -// there is some data-related problem like a `NULL` value in a column to -// which `NOT NULL` would be added). If a statement fails, all -// subsequent statements in the batch are automatically cancelled. -// -// Each batch of statements is assigned a name which can be used with -// the [Operations][google.longrunning.Operations] API to monitor -// progress. See the -// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more -// details. -message UpdateDatabaseDdlRequest { - // Required. The database to update. - string database = 1; - - // DDL statements to be applied to the database. - repeated string statements = 2; - - // If empty, the new update request is assigned an - // automatically-generated operation ID. Otherwise, `operation_id` - // is used to construct the name of the resulting - // [Operation][google.longrunning.Operation]. - // - // Specifying an explicit operation ID simplifies determining - // whether the statements were executed in the event that the - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, - // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and - // `operation_id` fields can be combined to form the - // [name][google.longrunning.Operation.name] of the resulting - // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. - // - // `operation_id` should be unique within the database, and must be - // a valid identifier: `[a-z][a-z0-9_]*`. Note that - // automatically-generated operation IDs always begin with an - // underscore. If the named operation already exists, - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns - // `ALREADY_EXISTS`. - string operation_id = 3; -} - -// Metadata type for the operation returned by -// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]. -message UpdateDatabaseDdlMetadata { - // The database being modified. - string database = 1; - - // For an update this list contains all the statements. For an - // individual statement, this list contains only that statement. - repeated string statements = 2; - - // Reports the commit timestamps of all statements that have - // succeeded so far, where `commit_timestamps[i]` is the commit - // timestamp for the statement `statements[i]`. - repeated google.protobuf.Timestamp commit_timestamps = 3; -} - -// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. -message DropDatabaseRequest { - // Required. The database to be dropped. - string database = 1; -} - -// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlRequest { - // Required. The database whose schema we wish to get. - string database = 1; -} - -// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. -message GetDatabaseDdlResponse { - // A list of formatted DDL statements defining the schema of the database - // specified in the request. - repeated string statements = 1; -} diff --git a/spanner/google/cloud/spanner_v1/proto/spanner_instance_admin.proto b/spanner/google/cloud/spanner_v1/proto/spanner_instance_admin.proto deleted file mode 100644 index e960e5428e3a..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/spanner_instance_admin.proto +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.spanner.admin.instance.v1; - -import "google/api/annotations.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.Admin.Instance.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/admin/instance/v1;instance"; -option java_multiple_files = true; -option java_outer_classname = "SpannerInstanceAdminProto"; -option java_package = "com.google.spanner.admin.instance.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Instance\\V1"; - - -// Cloud Spanner Instance Admin API -// -// The Cloud Spanner Instance Admin API can be used to create, delete, -// modify and list instances. Instances are dedicated Cloud Spanner serving -// and storage resources to be used by Cloud Spanner databases. -// -// Each instance has a "configuration", which dictates where the -// serving resources for the Cloud Spanner instance are located (e.g., -// US-central, Europe). Configurations are created by Google based on -// resource availability. -// -// Cloud Spanner billing is based on the instances that exist and their -// sizes. After an instance exists, there are no additional -// per-database or per-operation charges for use of the instance -// (though there may be additional network bandwidth charges). -// Instances offer isolation: problems with databases in one instance -// will not affect other instances. However, within an instance -// databases can affect each other. For example, if one database in an -// instance receives a lot of requests and consumes most of the -// instance resources, fewer resources are available for other -// databases in that instance, and their performance may suffer. -service InstanceAdmin { - // Lists the supported instance configurations for a given project. - rpc ListInstanceConfigs(ListInstanceConfigsRequest) returns (ListInstanceConfigsResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instanceConfigs" - }; - } - - // Gets information about a particular instance configuration. - rpc GetInstanceConfig(GetInstanceConfigRequest) returns (InstanceConfig) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instanceConfigs/*}" - }; - } - - // Lists all instances in the given project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/instances" - }; - } - - // Gets information about a particular instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v1/{name=projects/*/instances/*}" - }; - } - - // Creates an instance and begins preparing it to begin serving. The - // returned [long-running operation][google.longrunning.Operation] - // can be used to track the progress of preparing the new - // instance. The instance name is assigned by the caller. If the - // named instance already exists, `CreateInstance` returns - // `ALREADY_EXISTS`. - // - // Immediately upon completion of this request: - // - // * The instance is readable via the API, with all requested attributes - // but no allocated resources. Its state is `CREATING`. - // - // Until completion of the returned operation: - // - // * Cancelling the operation renders the instance immediately unreadable - // via the API. - // * The instance can be deleted. - // * All other attempts to modify the instance are rejected. - // - // Upon completion of the returned operation: - // - // * Billing for all successfully-allocated resources begins (some types - // may have lower than the requested levels). - // * Databases can be created in the instance. - // * The instance's allocated resource levels are readable via the API. - // * The instance's state becomes `READY`. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track creation of the instance. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/instances" - body: "*" - }; - } - - // Updates an instance, and begins allocating or releasing resources - // as requested. The returned [long-running - // operation][google.longrunning.Operation] can be used to track the - // progress of updating the instance. If the named instance does not - // exist, returns `NOT_FOUND`. - // - // Immediately upon completion of this request: - // - // * For resource types for which a decrease in the instance's allocation - // has been requested, billing is based on the newly-requested level. - // - // Until completion of the returned operation: - // - // * Cancelling the operation sets its metadata's - // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - // restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, - // after which point it terminates with a `CANCELLED` status. - // * All other attempts to modify the instance are rejected. - // * Reading the instance via the API continues to give the pre-request - // resource levels. - // - // Upon completion of the returned operation: - // - // * Billing begins for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources are available for serving the instance's - // tables. - // * The instance's new resource levels are readable via the API. - // - // The returned [long-running operation][google.longrunning.Operation] will - // have a name of the format `/operations/` and - // can be used to track the instance modification. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - // The [response][google.longrunning.Operation.response] field type is - // [Instance][google.spanner.admin.instance.v1.Instance], if successful. - // - // Authorization requires `spanner.instances.update` permission on - // resource [name][google.spanner.admin.instance.v1.Instance.name]. - rpc UpdateInstance(UpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v1/{instance.name=projects/*/instances/*}" - body: "*" - }; - } - - // Deletes an instance. - // - // Immediately upon completion of the request: - // - // * Billing ceases for all of the instance's reserved resources. - // - // Soon afterward: - // - // * The instance and *all of its databases* immediately and - // irrevocably disappear from the API. All data in the databases - // is permanently deleted. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/instances/*}" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - // - // Authorization requires `spanner.instances.setIamPolicy` on - // [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - // - // Authorization requires `spanner.instances.getIamPolicy` on - // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - // - // Attempting this RPC on a non-existent Cloud Spanner instance resource will - // result in a NOT_FOUND error if the user has `spanner.instances.list` - // permission on the containing Google Cloud Project. Otherwise returns an - // empty set of permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v1/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// A possible configuration for a Cloud Spanner instance. Configurations -// define the geographic placement of nodes and their replication. -message InstanceConfig { - // A unique identifier for the instance configuration. Values - // are of the form - // `projects//instanceConfigs/[a-z][-a-z0-9]*` - string name = 1; - - // The name of this instance configuration as it appears in UIs. - string display_name = 2; -} - -// An isolated set of Cloud Spanner resources on which databases can be hosted. -message Instance { - // Indicates the current state of the instance. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The instance is still being created. Resources may not be - // available yet, and operations such as database creation may not - // work. - CREATING = 1; - - // The instance is fully created and ready to do work such as - // creating databases. - READY = 2; - } - - // Required. A unique identifier for the instance, which cannot be changed - // after the instance is created. Values are of the form - // `projects//instances/[a-z][-a-z0-9]*[a-z0-9]`. The final - // segment of the name must be between 6 and 30 characters in length. - string name = 1; - - // Required. The name of the instance's configuration. Values are of the form - // `projects//instanceConfigs/`. See - // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. - string config = 2; - - // Required. The descriptive name for this instance as it appears in UIs. - // Must be unique per project and between 4 and 30 characters in length. - string display_name = 3; - - // Required. The number of nodes allocated to this instance. This may be zero - // in API responses for instances that are not yet in state `READY`. - // - // See [the documentation](https://cloud.google.com/spanner/docs/instances#node_count) - // for more information about nodes. - int32 node_count = 5; - - // Output only. The current instance state. For - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], the state must be - // either omitted or set to `CREATING`. For - // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance], the state must be - // either omitted or set to `READY`. - State state = 6; - - // Cloud Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. Cloud Labels can be used to filter collections of - // resources. They can be used to control how resource metrics are aggregated. - // And they can be used as arguments to policy management rules (e.g. route, - // firewall, load balancing, etc.). - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. - // * Label values must be between 0 and 63 characters long and must conform - // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. - // * No more than 64 labels can be associated with a given resource. - // - // See https://goo.gl/xmQnxf for more information on and examples of labels. - // - // If you plan to use labels in your own code, please note that additional - // characters may be allowed in the future. And so you are advised to use an - // internal label representation, such as JSON, which doesn't rely upon - // specific characters being disallowed. For example, representing labels - // as the string: name + "_" + value would prove problematic if we were to - // allow "_" in a future release. - map labels = 7; -} - -// The request for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsRequest { - // Required. The name of the project for which a list of supported instance - // configurations is requested. Values are of the form - // `projects/`. - string parent = 1; - - // Number of instance configurations to be returned in the response. If 0 or - // less, defaults to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token] - // from a previous [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse]. - string page_token = 3; -} - -// The response for [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. -message ListInstanceConfigsResponse { - // The list of requested instance configurations. - repeated InstanceConfig instance_configs = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs] call to - // fetch more of the matching instance configurations. - string next_page_token = 2; -} - -// The request for -// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig]. -message GetInstanceConfigRequest { - // Required. The name of the requested instance configuration. Values are of - // the form `projects//instanceConfigs/`. - string name = 1; -} - -// The request for [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance]. -message GetInstanceRequest { - // Required. The name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// The request for [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceRequest { - // Required. The name of the project in which to create the instance. Values - // are of the form `projects/`. - string parent = 1; - - // Required. The ID of the instance to create. Valid identifiers are of the - // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 6 and 30 characters in - // length. - string instance_id = 2; - - // Required. The instance to create. The name may be omitted, but if - // specified must be `/instances/`. - Instance instance = 3; -} - -// The request for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesRequest { - // Required. The name of the project for which a list of instances is - // requested. Values are of the form `projects/`. - string parent = 1; - - // Number of instances to be returned in the response. If 0 or less, defaults - // to the server's maximum allowed page size. - int32 page_size = 2; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token] from a - // previous [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse]. - string page_token = 3; - - // An expression for filtering the results of the request. Filter rules are - // case insensitive. The fields eligible for filtering are: - // - // * `name` - // * `display_name` - // * `labels.key` where key is the name of a label - // - // Some examples of using filters are: - // - // * `name:*` --> The instance has a name. - // * `name:Howl` --> The instance's name contains the string "howl". - // * `name:HOWL` --> Equivalent to above. - // * `NAME:howl` --> Equivalent to above. - // * `labels.env:*` --> The instance has the label "env". - // * `labels.env:dev` --> The instance has the label "env" and the value of - // the label contains the string "dev". - // * `name:howl labels.env:dev` --> The instance's name contains "howl" and - // it has the label "env" with its value - // containing "dev". - string filter = 4; -} - -// The response for [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // `next_page_token` can be sent in a subsequent - // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances] call to fetch more - // of the matching instances. - string next_page_token = 2; -} - -// The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceRequest { - // Required. The instance to update, which must always include the instance - // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. - Instance instance = 1; - - // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. - // The field mask must always be specified; this prevents any future fields in - // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know - // about them. - google.protobuf.FieldMask field_mask = 2; -} - -// The request for [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance]. -message DeleteInstanceRequest { - // Required. The name of the instance to be deleted. Values are of the form - // `projects//instances/` - string name = 1; -} - -// Metadata type for the operation returned by -// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]. -message CreateInstanceMetadata { - // The instance being created. - Instance instance = 1; - - // The time at which the - // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance] request was - // received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} - -// Metadata type for the operation returned by -// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. -message UpdateInstanceMetadata { - // The desired end state of the update. - Instance instance = 1; - - // The time at which [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance] - // request was received. - google.protobuf.Timestamp start_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp end_time = 4; -} diff --git a/spanner/google/cloud/spanner_v1/proto/spanner_pb2.py b/spanner/google/cloud/spanner_v1/proto/spanner_pb2.py deleted file mode 100644 index 3415264909ef..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/spanner_pb2.py +++ /dev/null @@ -1,3307 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/spanner.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 -from google.cloud.spanner_v1.proto import ( - keys_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_keys__pb2, -) -from google.cloud.spanner_v1.proto import ( - mutation_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_mutation__pb2, -) -from google.cloud.spanner_v1.proto import ( - result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.cloud.spanner_v1.proto import ( - type_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_type__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/spanner.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\014SpannerProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352A_\n\037spanner.googleapis.com/Database\022\n\x11partition_options\x18\x06 \x01(\x0b\x32#.google.spanner.v1.PartitionOptions\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"\xb1\x02\n\x14PartitionReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x05 \x03(\t\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12>\n\x11partition_options\x18\t \x01(\x0b\x32#.google.spanner.v1.PartitionOptions"$\n\tPartition\x12\x17\n\x0fpartition_token\x18\x01 \x01(\x0c"z\n\x11PartitionResponse\x12\x30\n\npartitions\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.Partition\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xab\x02\n\x0bReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x14\n\x07\x63olumns\x18\x05 \x03(\tB\x03\xe0\x41\x02\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12\r\n\x05limit\x18\x08 \x01(\x03\x12\x14\n\x0cresume_token\x18\t \x01(\x0c\x12\x17\n\x0fpartition_token\x18\n \x01(\x0c"\x8f\x01\n\x17\x42\x65ginTransactionRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x07options\x18\x02 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsB\x03\xe0\x41\x02"\xea\x01\n\rCommitRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x18\n\x0etransaction_id\x18\x02 \x01(\x0cH\x00\x12G\n\x16single_use_transaction\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12.\n\tmutations\x18\x04 \x03(\x0b\x32\x1b.google.spanner.v1.MutationB\r\n\x0btransaction"F\n\x0e\x43ommitResponse\x12\x34\n\x10\x63ommit_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"g\n\x0fRollbackRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x1b\n\x0etransaction_id\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x32\xc0\x16\n\x07Spanner\x12\xa6\x01\n\rCreateSession\x12\'.google.spanner.v1.CreateSessionRequest\x1a\x1a.google.spanner.v1.Session"P\x82\xd3\xe4\x93\x02?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\x01*\xda\x41\x08\x64\x61tabase\x12\xe0\x01\n\x13\x42\x61tchCreateSessions\x12-.google.spanner.v1.BatchCreateSessionsRequest\x1a..google.spanner.v1.BatchCreateSessionsResponse"j\x82\xd3\xe4\x93\x02K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\x01*\xda\x41\x16\x64\x61tabase,session_count\x12\x97\x01\n\nGetSession\x12$.google.spanner.v1.GetSessionRequest\x1a\x1a.google.spanner.v1.Session"G\x82\xd3\xe4\x93\x02:\x12\x38/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xae\x01\n\x0cListSessions\x12&.google.spanner.v1.ListSessionsRequest\x1a\'.google.spanner.v1.ListSessionsResponse"M\x82\xd3\xe4\x93\x02<\x12:/v1/{database=projects/*/instances/*/databases/*}/sessions\xda\x41\x08\x64\x61tabase\x12\x99\x01\n\rDeleteSession\x12\'.google.spanner.v1.DeleteSessionRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xa3\x01\n\nExecuteSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a\x1c.google.spanner.v1.ResultSet"Q\x82\xd3\xe4\x93\x02K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\x01*\x12\xbe\x01\n\x13\x45xecuteStreamingSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a#.google.spanner.v1.PartialResultSet"Z\x82\xd3\xe4\x93\x02T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\x01*0\x01\x12\xc0\x01\n\x0f\x45xecuteBatchDml\x12).google.spanner.v1.ExecuteBatchDmlRequest\x1a*.google.spanner.v1.ExecuteBatchDmlResponse"V\x82\xd3\xe4\x93\x02P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\x01*\x12\x91\x01\n\x04Read\x12\x1e.google.spanner.v1.ReadRequest\x1a\x1c.google.spanner.v1.ResultSet"K\x82\xd3\xe4\x93\x02\x45"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\x01*\x12\xac\x01\n\rStreamingRead\x12\x1e.google.spanner.v1.ReadRequest\x1a#.google.spanner.v1.PartialResultSet"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\x01*0\x01\x12\xc9\x01\n\x10\x42\x65ginTransaction\x12*.google.spanner.v1.BeginTransactionRequest\x1a\x1e.google.spanner.v1.Transaction"i\x82\xd3\xe4\x93\x02Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\x01*\xda\x41\x0fsession,options\x12\xeb\x01\n\x06\x43ommit\x12 .google.spanner.v1.CommitRequest\x1a!.google.spanner.v1.CommitResponse"\x9b\x01\x82\xd3\xe4\x93\x02G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\x01*\xda\x41 session,transaction_id,mutations\xda\x41(session,single_use_transaction,mutations\x12\xb0\x01\n\x08Rollback\x12".google.spanner.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"h\x82\xd3\xe4\x93\x02I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\x01*\xda\x41\x16session,transaction_id\x12\xb7\x01\n\x0ePartitionQuery\x12(.google.spanner.v1.PartitionQueryRequest\x1a$.google.spanner.v1.PartitionResponse"U\x82\xd3\xe4\x93\x02O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\x01*\x12\xb4\x01\n\rPartitionRead\x12\'.google.spanner.v1.PartitionReadRequest\x1a$.google.spanner.v1.PartitionResponse"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\x01*\x1aw\xca\x41\x16spanner.googleapis.com\xd2\x41[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.dataB\xf7\x01\n\x15\x63om.google.spanner.v1B\x0cSpannerProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x41_\n\x1fspanner.googleapis.com/Database\x12 The - session has the label "env". - ``labels.env:dev`` --> The - session has the label "env" and the value of the label - contains the string "dev". - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsRequest) - ), -) -_sym_db.RegisterMessage(ListSessionsRequest) - -ListSessionsResponse = _reflection.GeneratedProtocolMessageType( - "ListSessionsResponse", - (_message.Message,), - dict( - DESCRIPTOR=_LISTSESSIONSRESPONSE, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The response for - [ListSessions][google.spanner.v1.Spanner.ListSessions]. - - - Attributes: - sessions: - The list of requested sessions. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent - [ListSessions][google.spanner.v1.Spanner.ListSessions] call to - fetch more of the matching sessions. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ListSessionsResponse) - ), -) -_sym_db.RegisterMessage(ListSessionsResponse) - -DeleteSessionRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSessionRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DELETESESSIONREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [DeleteSession][google.spanner.v1.Spanner.DeleteSession]. - - - Attributes: - name: - Required. The name of the session to delete. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.DeleteSessionRequest) - ), -) -_sym_db.RegisterMessage(DeleteSessionRequest) - -ExecuteSqlRequest = _reflection.GeneratedProtocolMessageType( - "ExecuteSqlRequest", - (_message.Message,), - dict( - ParamTypesEntry=_reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_EXECUTESQLREQUEST_PARAMTYPESENTRY, - __module__="google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry) - ), - ), - DESCRIPTOR=_EXECUTESQLREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and - [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. - - - Attributes: - session: - Required. The session in which the SQL query should be - performed. - transaction: - The transaction to use. For queries, if none is provided, the - default is a temporary read-only transaction with strong - concurrency. Standard DML statements require a read-write - transaction. To protect against replays, single-use - transactions are not supported. The caller must either supply - an existing transaction ID or begin a new transaction. - Partitioned DML requires an existing Partitioned DML - transaction ID. - sql: - Required. The SQL string. - params: - Parameter names and values that bind to placeholders in the - SQL string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in - [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON - strings. In these cases, ``param_types`` can be used to - specify the exact SQL type for some or all of the SQL - statement parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - resume_token: - If this request is resuming a previously interrupted SQL - statement execution, ``resume_token`` should be copied from - the last - [PartialResultSet][google.spanner.v1.PartialResultSet] yielded - before the interruption. Doing this enables the new SQL - statement execution to resume where the last one left off. The - rest of the request parameters must exactly match the request - that yielded this token. - query_mode: - Used to control the amount of debugging information returned - in [ResultSetStats][google.spanner.v1.ResultSetStats]. If [par - tition\_token][google.spanner.v1.ExecuteSqlRequest.partition\_ - token] is set, - [query\_mode][google.spanner.v1.ExecuteSqlRequest.query\_mode] - can only be set to [QueryMode.NORMAL][google.spanner.v1.Execut - eSqlRequest.QueryMode.NORMAL]. - partition_token: - If present, results will be restricted to the specified - partition previously created using PartitionQuery(). There - must be an exact match for the values of fields common to this - message and the PartitionQueryRequest message used to create - this partition\_token. - seqno: - A per-transaction sequence number used to identify this - request. This field makes each request idempotent such that if - the request is received multiple times, at most one will - succeed. The sequence number must be monotonically increasing - within the transaction. If a request arrives for the first - time with an out-of-order sequence number, the transaction may - be aborted. Replays of previously handled requests will yield - the same response as the first execution. Required for DML - statements. Ignored for queries. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteSqlRequest) - ), -) -_sym_db.RegisterMessage(ExecuteSqlRequest) -_sym_db.RegisterMessage(ExecuteSqlRequest.ParamTypesEntry) - -ExecuteBatchDmlRequest = _reflection.GeneratedProtocolMessageType( - "ExecuteBatchDmlRequest", - (_message.Message,), - dict( - Statement=_reflection.GeneratedProtocolMessageType( - "Statement", - (_message.Message,), - dict( - ParamTypesEntry=_reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY, - __module__="google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry) - ), - ), - DESCRIPTOR=_EXECUTEBATCHDMLREQUEST_STATEMENT, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""A single DML statement. - - - Attributes: - sql: - Required. The DML string. - params: - Parameter names and values that bind to placeholders in the - DML string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in [params - ][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] - as JSON strings. In these cases, ``param_types`` can be used - to specify the exact SQL type for some or all of the SQL - statement parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest.Statement) - ), - ), - DESCRIPTOR=_EXECUTEBATCHDMLREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. - - - Attributes: - session: - Required. The session in which the DML statements should be - performed. - transaction: - Required. The transaction to use. Must be a read-write - transaction. To protect against replays, single-use - transactions are not supported. The caller must either supply - an existing transaction ID or begin a new transaction. - statements: - Required. The list of statements to execute in this batch. - Statements are executed serially, such that the effects of - statement ``i`` are visible to statement ``i+1``. Each - statement must be a DML statement. Execution stops at the - first failed statement; the remaining statements are not - executed. Callers must provide at least one statement. - seqno: - Required. A per-transaction sequence number used to identify - this request. This field makes each request idempotent such - that if the request is received multiple times, at most one - will succeed. The sequence number must be monotonically - increasing within the transaction. If a request arrives for - the first time with an out-of-order sequence number, the - transaction may be aborted. Replays of previously handled - requests will yield the same response as the first execution. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlRequest) - ), -) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement) -_sym_db.RegisterMessage(ExecuteBatchDmlRequest.Statement.ParamTypesEntry) - -ExecuteBatchDmlResponse = _reflection.GeneratedProtocolMessageType( - "ExecuteBatchDmlResponse", - (_message.Message,), - dict( - DESCRIPTOR=_EXECUTEBATCHDMLRESPONSE, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The response for - [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a - list of [ResultSet][google.spanner.v1.ResultSet] messages, one for each - DML statement that has successfully executed, in the same order as the - statements in the request. If a statement fails, the status in the - response body identifies the cause of the failure. - - To check for DML statements that failed, use the following approach: - - 1. Check the status in the response message. The - [google.rpc.Code][google.rpc.Code] enum value ``OK`` indicates that - all statements were executed successfully. - 2. If the status was not ``OK``, check the number of result sets in the - response. If the response contains ``N`` - [ResultSet][google.spanner.v1.ResultSet] messages, then statement - ``N+1`` in the request failed. - - Example 1: - - - Request: 5 DML statements, all executed successfully. - - Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with - the status ``OK``. - - Example 2: - - - Request: 5 DML statements. The third statement has a syntax error. - - Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a - syntax error (``INVALID_ARGUMENT``) status. The number of - [ResultSet][google.spanner.v1.ResultSet] messages indicates that the - third statement failed, and the fourth and fifth statements were not - executed. - - - Attributes: - result_sets: - One [ResultSet][google.spanner.v1.ResultSet] for each - statement in the request that ran successfully, in the same - order as the statements in the request. Each - [ResultSet][google.spanner.v1.ResultSet] does not contain any - rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] - in each [ResultSet][google.spanner.v1.ResultSet] contain the - number of rows modified by the statement. Only the first - [ResultSet][google.spanner.v1.ResultSet] in the response - contains valid - [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. - status: - If all DML statements are executed successfully, the status is - ``OK``. Otherwise, the error status of the first failed - statement. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ExecuteBatchDmlResponse) - ), -) -_sym_db.RegisterMessage(ExecuteBatchDmlResponse) - -PartitionOptions = _reflection.GeneratedProtocolMessageType( - "PartitionOptions", - (_message.Message,), - dict( - DESCRIPTOR=_PARTITIONOPTIONS, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""Options for a PartitionQueryRequest and - PartitionReadRequest. - - - Attributes: - partition_size_bytes: - \ **Note:** This hint is currently ignored by PartitionQuery - and PartitionRead requests. The desired data size for each - partition generated. The default for this option is currently - 1 GiB. This is only a hint. The actual size of each partition - may be smaller or larger than this size request. - max_partitions: - \ **Note:** This hint is currently ignored by PartitionQuery - and PartitionRead requests. The desired maximum number of - partitions to return. For example, this may be set to the - number of workers available. The default for this option is - currently 10,000. The maximum value is currently 200,000. This - is only a hint. The actual number of partitions returned may - be smaller or larger than this maximum count request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionOptions) - ), -) -_sym_db.RegisterMessage(PartitionOptions) - -PartitionQueryRequest = _reflection.GeneratedProtocolMessageType( - "PartitionQueryRequest", - (_message.Message,), - dict( - ParamTypesEntry=_reflection.GeneratedProtocolMessageType( - "ParamTypesEntry", - (_message.Message,), - dict( - DESCRIPTOR=_PARTITIONQUERYREQUEST_PARAMTYPESENTRY, - __module__="google.cloud.spanner_v1.proto.spanner_pb2" - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest.ParamTypesEntry) - ), - ), - DESCRIPTOR=_PARTITIONQUERYREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] - - - Attributes: - session: - Required. The session used to create the partitions. - transaction: - Read only snapshot transactions are supported, read/write and - single use transactions are not. - sql: - Required. The query request to generate partitions for. The - request will fail if the query is not root partitionable. The - query plan of a root partitionable query has a single - distributed union operator. A distributed union operator - conceptually divides one or more tables into multiple splits, - remotely evaluates a subquery independently on each split, and - then unions all results. This must not contain DML commands, - such as INSERT, UPDATE, or DELETE. Use [ExecuteStreamingSql][g - oogle.spanner.v1.Spanner.ExecuteStreamingSql] with a - PartitionedDml transaction for large, partition-friendly DML - operations. - params: - Parameter names and values that bind to placeholders in the - SQL string. A parameter placeholder consists of the ``@`` - character followed by the parameter name (for example, - ``@firstName``). Parameter names can contain letters, numbers, - and underscores. Parameters can appear anywhere that a - literal value is expected. The same parameter name can be used - more than once, for example: ``"WHERE id > @msg_id AND id < - @msg_id + 100"`` It is an error to execute a SQL statement - with unbound parameters. - param_types: - It is not always possible for Cloud Spanner to infer the right - SQL type from a JSON value. For example, values of type - ``BYTES`` and values of type ``STRING`` both appear in - [params][google.spanner.v1.PartitionQueryRequest.params] as - JSON strings. In these cases, ``param_types`` can be used to - specify the exact SQL type for some or all of the SQL query - parameters. See the definition of - [Type][google.spanner.v1.Type] for more information about SQL - types. - partition_options: - Additional options that affect how many partitions are - created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionQueryRequest) - ), -) -_sym_db.RegisterMessage(PartitionQueryRequest) -_sym_db.RegisterMessage(PartitionQueryRequest.ParamTypesEntry) - -PartitionReadRequest = _reflection.GeneratedProtocolMessageType( - "PartitionReadRequest", - (_message.Message,), - dict( - DESCRIPTOR=_PARTITIONREADREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [PartitionRead][google.spanner.v1.Spanner.PartitionRead] - - - Attributes: - session: - Required. The session used to create the partitions. - transaction: - Read only snapshot transactions are supported, read/write and - single use transactions are not. - table: - Required. The name of the table in the database to be read. - index: - If non-empty, the name of an index on - [table][google.spanner.v1.PartitionReadRequest.table]. This - index is used instead of the table primary key when - interpreting - [key\_set][google.spanner.v1.PartitionReadRequest.key\_set] - and sorting result rows. See - [key\_set][google.spanner.v1.PartitionReadRequest.key\_set] - for further information. - columns: - The columns of - [table][google.spanner.v1.PartitionReadRequest.table] to be - returned for each row matching this request. - key_set: - Required. ``key_set`` identifies the rows to be yielded. - ``key_set`` names the primary keys of the rows in - [table][google.spanner.v1.PartitionReadRequest.table] to be - yielded, unless - [index][google.spanner.v1.PartitionReadRequest.index] is - present. If - [index][google.spanner.v1.PartitionReadRequest.index] is - present, then - [key\_set][google.spanner.v1.PartitionReadRequest.key\_set] - instead names index keys in - [index][google.spanner.v1.PartitionReadRequest.index]. It is - not an error for the ``key_set`` to name rows that do not - exist in the database. Read yields nothing for nonexistent - rows. - partition_options: - Additional options that affect how many partitions are - created. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionReadRequest) - ), -) -_sym_db.RegisterMessage(PartitionReadRequest) - -Partition = _reflection.GeneratedProtocolMessageType( - "Partition", - (_message.Message,), - dict( - DESCRIPTOR=_PARTITION, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""Information returned for each partition returned in a - PartitionResponse. - - - Attributes: - partition_token: - This token can be passed to Read, StreamingRead, ExecuteSql, - or ExecuteStreamingSql requests to restrict the results to - those identified by this partition token. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Partition) - ), -) -_sym_db.RegisterMessage(Partition) - -PartitionResponse = _reflection.GeneratedProtocolMessageType( - "PartitionResponse", - (_message.Message,), - dict( - DESCRIPTOR=_PARTITIONRESPONSE, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The response for - [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or - [PartitionRead][google.spanner.v1.Spanner.PartitionRead] - - - Attributes: - partitions: - Partitions created by this request. - transaction: - Transaction created by this request. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.PartitionResponse) - ), -) -_sym_db.RegisterMessage(PartitionResponse) - -ReadRequest = _reflection.GeneratedProtocolMessageType( - "ReadRequest", - (_message.Message,), - dict( - DESCRIPTOR=_READREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for [Read][google.spanner.v1.Spanner.Read] and - [StreamingRead][google.spanner.v1.Spanner.StreamingRead]. - - - Attributes: - session: - Required. The session in which the read should be performed. - transaction: - The transaction to use. If none is provided, the default is a - temporary read-only transaction with strong concurrency. - table: - Required. The name of the table in the database to be read. - index: - If non-empty, the name of an index on - [table][google.spanner.v1.ReadRequest.table]. This index is - used instead of the table primary key when interpreting - [key\_set][google.spanner.v1.ReadRequest.key\_set] and sorting - result rows. See - [key\_set][google.spanner.v1.ReadRequest.key\_set] for further - information. - columns: - Required. The columns of - [table][google.spanner.v1.ReadRequest.table] to be returned - for each row matching this request. - key_set: - Required. ``key_set`` identifies the rows to be yielded. - ``key_set`` names the primary keys of the rows in - [table][google.spanner.v1.ReadRequest.table] to be yielded, - unless [index][google.spanner.v1.ReadRequest.index] is - present. If [index][google.spanner.v1.ReadRequest.index] is - present, then - [key\_set][google.spanner.v1.ReadRequest.key\_set] instead - names index keys in - [index][google.spanner.v1.ReadRequest.index]. If the [partiti - on\_token][google.spanner.v1.ReadRequest.partition\_token] - field is empty, rows are yielded in table primary key order - (if [index][google.spanner.v1.ReadRequest.index] is empty) or - index key order (if - [index][google.spanner.v1.ReadRequest.index] is non-empty). If - the [partition\_token][google.spanner.v1.ReadRequest.partition - \_token] field is not empty, rows will be yielded in an - unspecified order. It is not an error for the ``key_set`` to - name rows that do not exist in the database. Read yields - nothing for nonexistent rows. - limit: - If greater than zero, only the first ``limit`` rows are - yielded. If ``limit`` is zero, the default is no limit. A - limit cannot be specified if ``partition_token`` is set. - resume_token: - If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last - [PartialResultSet][google.spanner.v1.PartialResultSet] yielded - before the interruption. Doing this enables the new read to - resume where the last read left off. The rest of the request - parameters must exactly match the request that yielded this - token. - partition_token: - If present, results will be restricted to the specified - partition previously created using PartitionRead(). There must - be an exact match for the values of fields common to this - message and the PartitionReadRequest message used to create - this partition\_token. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.ReadRequest) - ), -) -_sym_db.RegisterMessage(ReadRequest) - -BeginTransactionRequest = _reflection.GeneratedProtocolMessageType( - "BeginTransactionRequest", - (_message.Message,), - dict( - DESCRIPTOR=_BEGINTRANSACTIONREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. - - - Attributes: - session: - Required. The session in which the transaction runs. - options: - Required. Options for the new transaction. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.BeginTransactionRequest) - ), -) -_sym_db.RegisterMessage(BeginTransactionRequest) - -CommitRequest = _reflection.GeneratedProtocolMessageType( - "CommitRequest", - (_message.Message,), - dict( - DESCRIPTOR=_COMMITREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [Commit][google.spanner.v1.Spanner.Commit]. - - - Attributes: - session: - Required. The session in which the transaction to be committed - is running. - transaction: - Required. The transaction in which to commit. - transaction_id: - Commit a previously-started transaction. - single_use_transaction: - Execute mutations in a temporary transaction. Note that unlike - commit of a previously-started transaction, commit with a - temporary transaction is non-idempotent. That is, if the - ``CommitRequest`` is sent to Cloud Spanner more than once (for - instance, due to retries in the application, or in the - transport library), it is possible that the mutations are - executed more than once. If this is undesirable, use - [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] - and [Commit][google.spanner.v1.Spanner.Commit] instead. - mutations: - The mutations to be executed when this transaction commits. - All mutations are applied atomically, in the order they appear - in this list. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.CommitRequest) - ), -) -_sym_db.RegisterMessage(CommitRequest) - -CommitResponse = _reflection.GeneratedProtocolMessageType( - "CommitResponse", - (_message.Message,), - dict( - DESCRIPTOR=_COMMITRESPONSE, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The response for - [Commit][google.spanner.v1.Spanner.Commit]. - - - Attributes: - commit_timestamp: - The Cloud Spanner timestamp at which the transaction - committed. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.CommitResponse) - ), -) -_sym_db.RegisterMessage(CommitResponse) - -RollbackRequest = _reflection.GeneratedProtocolMessageType( - "RollbackRequest", - (_message.Message,), - dict( - DESCRIPTOR=_ROLLBACKREQUEST, - __module__="google.cloud.spanner_v1.proto.spanner_pb2", - __doc__="""The request for - [Rollback][google.spanner.v1.Spanner.Rollback]. - - - Attributes: - session: - Required. The session in which the transaction to roll back is - running. - transaction_id: - Required. The transaction to roll back. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.RollbackRequest) - ), -) -_sym_db.RegisterMessage(RollbackRequest) - - -DESCRIPTOR._options = None -_CREATESESSIONREQUEST.fields_by_name["database"]._options = None -_BATCHCREATESESSIONSREQUEST.fields_by_name["database"]._options = None -_BATCHCREATESESSIONSREQUEST.fields_by_name["session_count"]._options = None -_SESSION_LABELSENTRY._options = None -_SESSION._options = None -_GETSESSIONREQUEST.fields_by_name["name"]._options = None -_LISTSESSIONSREQUEST.fields_by_name["database"]._options = None -_DELETESESSIONREQUEST.fields_by_name["name"]._options = None -_EXECUTESQLREQUEST_PARAMTYPESENTRY._options = None -_EXECUTESQLREQUEST.fields_by_name["session"]._options = None -_EXECUTESQLREQUEST.fields_by_name["sql"]._options = None -_EXECUTEBATCHDMLREQUEST_STATEMENT_PARAMTYPESENTRY._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["session"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["transaction"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["statements"]._options = None -_EXECUTEBATCHDMLREQUEST.fields_by_name["seqno"]._options = None -_PARTITIONQUERYREQUEST_PARAMTYPESENTRY._options = None -_PARTITIONQUERYREQUEST.fields_by_name["session"]._options = None -_PARTITIONQUERYREQUEST.fields_by_name["sql"]._options = None -_PARTITIONREADREQUEST.fields_by_name["session"]._options = None -_PARTITIONREADREQUEST.fields_by_name["table"]._options = None -_PARTITIONREADREQUEST.fields_by_name["key_set"]._options = None -_READREQUEST.fields_by_name["session"]._options = None -_READREQUEST.fields_by_name["table"]._options = None -_READREQUEST.fields_by_name["columns"]._options = None -_READREQUEST.fields_by_name["key_set"]._options = None -_BEGINTRANSACTIONREQUEST.fields_by_name["session"]._options = None -_BEGINTRANSACTIONREQUEST.fields_by_name["options"]._options = None -_COMMITREQUEST.fields_by_name["session"]._options = None -_ROLLBACKREQUEST.fields_by_name["session"]._options = None -_ROLLBACKREQUEST.fields_by_name["transaction_id"]._options = None - -_SPANNER = _descriptor.ServiceDescriptor( - name="Spanner", - full_name="google.spanner.v1.Spanner", - file=DESCRIPTOR, - index=0, - serialized_options=_b( - "\312A\026spanner.googleapis.com\322A[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.data" - ), - serialized_start=4599, - serialized_end=7479, - methods=[ - _descriptor.MethodDescriptor( - name="CreateSession", - full_name="google.spanner.v1.Spanner.CreateSession", - index=0, - containing_service=None, - input_type=_CREATESESSIONREQUEST, - output_type=_SESSION, - serialized_options=_b( - '\202\323\344\223\002?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\001*\332A\010database' - ), - ), - _descriptor.MethodDescriptor( - name="BatchCreateSessions", - full_name="google.spanner.v1.Spanner.BatchCreateSessions", - index=1, - containing_service=None, - input_type=_BATCHCREATESESSIONSREQUEST, - output_type=_BATCHCREATESESSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\001*\332A\026database,session_count' - ), - ), - _descriptor.MethodDescriptor( - name="GetSession", - full_name="google.spanner.v1.Spanner.GetSession", - index=2, - containing_service=None, - input_type=_GETSESSIONREQUEST, - output_type=_SESSION, - serialized_options=_b( - "\202\323\344\223\002:\0228/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="ListSessions", - full_name="google.spanner.v1.Spanner.ListSessions", - index=3, - containing_service=None, - input_type=_LISTSESSIONSREQUEST, - output_type=_LISTSESSIONSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002<\022:/v1/{database=projects/*/instances/*/databases/*}/sessions\332A\010database" - ), - ), - _descriptor.MethodDescriptor( - name="DeleteSession", - full_name="google.spanner.v1.Spanner.DeleteSession", - index=4, - containing_service=None, - input_type=_DELETESESSIONREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\332A\004name" - ), - ), - _descriptor.MethodDescriptor( - name="ExecuteSql", - full_name="google.spanner.v1.Spanner.ExecuteSql", - index=5, - containing_service=None, - input_type=_EXECUTESQLREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET, - serialized_options=_b( - '\202\323\344\223\002K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="ExecuteStreamingSql", - full_name="google.spanner.v1.Spanner.ExecuteStreamingSql", - index=6, - containing_service=None, - input_type=_EXECUTESQLREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET, - serialized_options=_b( - '\202\323\344\223\002T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="ExecuteBatchDml", - full_name="google.spanner.v1.Spanner.ExecuteBatchDml", - index=7, - containing_service=None, - input_type=_EXECUTEBATCHDMLREQUEST, - output_type=_EXECUTEBATCHDMLRESPONSE, - serialized_options=_b( - '\202\323\344\223\002P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="Read", - full_name="google.spanner.v1.Spanner.Read", - index=8, - containing_service=None, - input_type=_READREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._RESULTSET, - serialized_options=_b( - '\202\323\344\223\002E"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="StreamingRead", - full_name="google.spanner.v1.Spanner.StreamingRead", - index=9, - containing_service=None, - input_type=_READREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2._PARTIALRESULTSET, - serialized_options=_b( - '\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="BeginTransaction", - full_name="google.spanner.v1.Spanner.BeginTransaction", - index=10, - containing_service=None, - input_type=_BEGINTRANSACTIONREQUEST, - output_type=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2._TRANSACTION, - serialized_options=_b( - '\202\323\344\223\002Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\001*\332A\017session,options' - ), - ), - _descriptor.MethodDescriptor( - name="Commit", - full_name="google.spanner.v1.Spanner.Commit", - index=11, - containing_service=None, - input_type=_COMMITREQUEST, - output_type=_COMMITRESPONSE, - serialized_options=_b( - '\202\323\344\223\002G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\001*\332A session,transaction_id,mutations\332A(session,single_use_transaction,mutations' - ), - ), - _descriptor.MethodDescriptor( - name="Rollback", - full_name="google.spanner.v1.Spanner.Rollback", - index=12, - containing_service=None, - input_type=_ROLLBACKREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - '\202\323\344\223\002I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\001*\332A\026session,transaction_id' - ), - ), - _descriptor.MethodDescriptor( - name="PartitionQuery", - full_name="google.spanner.v1.Spanner.PartitionQuery", - index=13, - containing_service=None, - input_type=_PARTITIONQUERYREQUEST, - output_type=_PARTITIONRESPONSE, - serialized_options=_b( - '\202\323\344\223\002O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\001*' - ), - ), - _descriptor.MethodDescriptor( - name="PartitionRead", - full_name="google.spanner.v1.Spanner.PartitionRead", - index=14, - containing_service=None, - input_type=_PARTITIONREADREQUEST, - output_type=_PARTITIONRESPONSE, - serialized_options=_b( - '\202\323\344\223\002N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\001*' - ), - ), - ], -) -_sym_db.RegisterServiceDescriptor(_SPANNER) - -DESCRIPTOR.services_by_name["Spanner"] = _SPANNER - -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py deleted file mode 100644 index 4505b75cbca8..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py +++ /dev/null @@ -1,412 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -from google.cloud.spanner_v1.proto import ( - result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2, -) -from google.cloud.spanner_v1.proto import ( - spanner_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2, -) -from google.cloud.spanner_v1.proto import ( - transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class SpannerStub(object): - """Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateSession = channel.unary_unary( - "/google.spanner.v1.Spanner/CreateSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - ) - self.BatchCreateSessions = channel.unary_unary( - "/google.spanner.v1.Spanner/BatchCreateSessions", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString, - ) - self.GetSession = channel.unary_unary( - "/google.spanner.v1.Spanner/GetSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString, - ) - self.ListSessions = channel.unary_unary( - "/google.spanner.v1.Spanner/ListSessions", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString, - ) - self.DeleteSession = channel.unary_unary( - "/google.spanner.v1.Spanner/DeleteSession", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ExecuteSql = channel.unary_unary( - "/google.spanner.v1.Spanner/ExecuteSql", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - ) - self.ExecuteStreamingSql = channel.unary_stream( - "/google.spanner.v1.Spanner/ExecuteStreamingSql", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - ) - self.ExecuteBatchDml = channel.unary_unary( - "/google.spanner.v1.Spanner/ExecuteBatchDml", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString, - ) - self.Read = channel.unary_unary( - "/google.spanner.v1.Spanner/Read", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString, - ) - self.StreamingRead = channel.unary_stream( - "/google.spanner.v1.Spanner/StreamingRead", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString, - ) - self.BeginTransaction = channel.unary_unary( - "/google.spanner.v1.Spanner/BeginTransaction", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString, - ) - self.Commit = channel.unary_unary( - "/google.spanner.v1.Spanner/Commit", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString, - ) - self.Rollback = channel.unary_unary( - "/google.spanner.v1.Spanner/Rollback", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.PartitionQuery = channel.unary_unary( - "/google.spanner.v1.Spanner/PartitionQuery", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - ) - self.PartitionRead = channel.unary_unary( - "/google.spanner.v1.Spanner/PartitionRead", - request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString, - ) - - -class SpannerServicer(object): - """Cloud Spanner API - - The Cloud Spanner API can be used to manage sessions and execute - transactions on data stored in Cloud Spanner databases. - """ - - def CreateSession(self, request, context): - """Creates a new session. A session can be used to perform - transactions that read and/or modify data in a Cloud Spanner database. - Sessions are meant to be reused for many consecutive - transactions. - - Sessions can only execute one transaction at a time. To execute - multiple concurrent read-write/write-only transactions, create - multiple sessions. Note that standalone reads and queries use a - transaction internally, and count toward the one transaction - limit. - - Active sessions use additional server resources, so it is a good idea to - delete idle and unneeded sessions. - Aside from explicit deletes, Cloud Spanner can delete sessions for which no - operations are sent for more than an hour. If a session is deleted, - requests to it return `NOT_FOUND`. - - Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., `"SELECT 1"`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BatchCreateSessions(self, request, context): - """Creates multiple new sessions. - - This API can be used to initialize a session cache on the clients. - See https://goo.gl/TgSFN2 for best practices on session cache management. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSession(self, request, context): - """Gets a session. Returns `NOT_FOUND` if the session does not exist. - This is mainly useful for determining whether a session is still - alive. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSessions(self, request, context): - """Lists all sessions in a given database. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSession(self, request, context): - """Ends a session, releasing server resources associated with it. This will - asynchronously trigger cancellation of any operations that are running with - this session. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteSql(self, request, context): - """Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; - if the query yields more data than that, the query fails with - a `FAILED_PRECONDITION` error. - - Operations inside read-write transactions might return `ABORTED`. If - this occurs, the application should restart the transaction from - the beginning. See [Transaction][google.spanner.v1.Transaction] for more - details. - - Larger result sets can be fetched in streaming fashion by calling - [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] - instead. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteStreamingSql(self, request, context): - """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the - result set as a stream. Unlike - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on - the size of the returned result set. However, no individual row in the - result set can exceed 100 MiB, and no column value can exceed 10 MiB. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ExecuteBatchDml(self, request, context): - """Executes a batch of SQL DML statements. This method allows many statements - to be run with lower latency than submitting them sequentially with - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. - - Statements are executed in sequential order. A request can succeed even if - a statement fails. The - [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] - field in the response provides information about the statement that failed. - Clients must inspect this field to determine whether an error occurred. - - Execution stops after the first failed statement; the remaining statements - are not executed. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Read(self, request, context): - """Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be - used to return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a `FAILED_PRECONDITION` - error. - - Reads inside read-write transactions might return `ABORTED`. If - this occurs, the application should restart the transaction from - the beginning. See [Transaction][google.spanner.v1.Transaction] for more - details. - - Larger result sets can be yielded in streaming fashion by calling - [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def StreamingRead(self, request, context): - """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set - as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no - limit on the size of the returned result set. However, no individual row in - the result set can exceed 100 MiB, and no column value can exceed - 10 MiB. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def BeginTransaction(self, request, context): - """Begins a new transaction. This step can often be skipped: - [Read][google.spanner.v1.Spanner.Read], - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and - [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a - side-effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Commit(self, request, context): - """Commits a transaction. The request includes the mutations to be - applied to rows in the database. - - `Commit` might return an `ABORTED` error. This can occur at any time; - commonly, the cause is conflicts with concurrent - transactions. However, it can also happen for a variety of other - reasons. If `Commit` returns `ABORTED`, the caller should re-attempt - the transaction from the beginning, re-using the same session. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Rollback(self, request, context): - """Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more - [Read][google.spanner.v1.Spanner.Read] or - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately - decides not to commit. - - `Rollback` returns `OK` if it successfully aborts the transaction, the - transaction was already aborted, or the transaction is not - found. `Rollback` never returns `ABORTED`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartitionQuery(self, request, context): - """Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to - specify a subset of the query result to read. The same session and - read-only transaction must be used by the PartitionQueryRequest used to - create the partition tokens and the ExecuteSqlRequests that use the - partition tokens. - - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, and - the whole operation must be restarted from the beginning. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartitionRead(self, request, context): - """Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a - subset of the read result to read. The same session and read-only - transaction must be used by the PartitionReadRequest used to create the - partition tokens and the ReadRequests that use the partition tokens. There - are no ordering guarantees on rows returned among the returned partition - tokens, or even within each individual StreamingRead call issued with a - partition_token. - - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, and - the whole operation must be restarted from the beginning. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_SpannerServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateSession": grpc.unary_unary_rpc_method_handler( - servicer.CreateSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString, - ), - "BatchCreateSessions": grpc.unary_unary_rpc_method_handler( - servicer.BatchCreateSessions, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.SerializeToString, - ), - "GetSession": grpc.unary_unary_rpc_method_handler( - servicer.GetSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString, - ), - "ListSessions": grpc.unary_unary_rpc_method_handler( - servicer.ListSessions, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.SerializeToString, - ), - "DeleteSession": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSession, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ExecuteSql": grpc.unary_unary_rpc_method_handler( - servicer.ExecuteSql, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString, - ), - "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler( - servicer.ExecuteStreamingSql, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString, - ), - "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler( - servicer.ExecuteBatchDml, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.SerializeToString, - ), - "Read": grpc.unary_unary_rpc_method_handler( - servicer.Read, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString, - ), - "StreamingRead": grpc.unary_stream_rpc_method_handler( - servicer.StreamingRead, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString, - ), - "BeginTransaction": grpc.unary_unary_rpc_method_handler( - servicer.BeginTransaction, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.SerializeToString, - ), - "Commit": grpc.unary_unary_rpc_method_handler( - servicer.Commit, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.SerializeToString, - ), - "Rollback": grpc.unary_unary_rpc_method_handler( - servicer.Rollback, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "PartitionQuery": grpc.unary_unary_rpc_method_handler( - servicer.PartitionQuery, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString, - ), - "PartitionRead": grpc.unary_unary_rpc_method_handler( - servicer.PartitionRead, - request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.FromString, - response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.spanner.v1.Spanner", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/spanner/google/cloud/spanner_v1/proto/transaction.proto b/spanner/google/cloud/spanner_v1/proto/transaction.proto deleted file mode 100644 index 7c2434b14a81..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/transaction.proto +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "TransactionProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// # Transactions -// -// -// Each session can have at most one active transaction at a time. After the -// active transaction is completed, the session can immediately be -// re-used for the next transaction. It is not necessary to create a -// new session for each transaction. -// -// # Transaction Modes -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. This transaction type provides guaranteed -// consistency across several reads, but does not allow -// writes. Snapshot read-only transactions can be configured to -// read at timestamps in the past. Snapshot read-only -// transactions do not need to be committed. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read/write data in a single database. They -// may, however, read/write data in different tables within that -// database. -// -// ## Locking Read-Write Transactions -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// ### Semantics -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// ### Retrying Aborted Transactions -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (e.g., many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of wall time spent -// retrying. -// -// ### Idle Transactions -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. In that case, the commit will -// fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (e.g., `SELECT 1`) prevents the -// transaction from becoming idle. -// -// ## Snapshot Read-Only Transactions -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transaction, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// ### Strong -// -// Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// ### Exact Staleness -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp <= -// the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// ### Bounded Staleness -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// ### Old Read Timestamps and Garbage Collection -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// ## Partitioned DML Transactions -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. -message TransactionOptions { - // Message type to initiate a read-write transaction. Currently this - // transaction type has no options. - message ReadWrite { - - } - - // Message type to initiate a Partitioned DML transaction. - message PartitionedDml { - - } - - // Message type to initiate a read-only transaction. - message ReadOnly { - // How to choose the timestamp for the read-only transaction. - oneof timestamp_bound { - // Read at a timestamp where all previously committed transactions - // are visible. - bool strong = 1; - - // Executes all reads at a timestamp >= `min_read_timestamp`. - // - // This is useful for requesting fresher data than some previous - // read, or data that is fresh enough to observe the effects of some - // previously committed transaction whose timestamp is known. - // - // Note that this option can only be used in single-use transactions. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. - // Example: `"2014-10-02T15:01:23.045123456Z"`. - google.protobuf.Timestamp min_read_timestamp = 2; - - // Read data at a timestamp >= `NOW - max_staleness` - // seconds. Guarantees that all writes that have committed more - // than the specified number of seconds ago are visible. Because - // Cloud Spanner chooses the exact timestamp, this mode works even if - // the client's local clock is substantially skewed from Cloud Spanner - // commit timestamps. - // - // Useful for reading the freshest data available at a nearby - // replica, while bounding the possible staleness if the local - // replica has fallen behind. - // - // Note that this option can only be used in single-use - // transactions. - google.protobuf.Duration max_staleness = 3; - - // Executes all reads at the given timestamp. Unlike other modes, - // reads at a specific timestamp are repeatable; the same read at - // the same timestamp always returns the same data. If the - // timestamp is in the future, the read will block until the - // specified timestamp, modulo the read's deadline. - // - // Useful for large scale consistent reads such as mapreduces, or - // for coordinating many reads against a consistent snapshot of the - // data. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. - // Example: `"2014-10-02T15:01:23.045123456Z"`. - google.protobuf.Timestamp read_timestamp = 4; - - // Executes all reads at a timestamp that is `exact_staleness` - // old. The timestamp is chosen soon after the read is started. - // - // Guarantees that all writes that have committed more than the - // specified number of seconds ago are visible. Because Cloud Spanner - // chooses the exact timestamp, this mode works even if the client's - // local clock is substantially skewed from Cloud Spanner commit - // timestamps. - // - // Useful for reading at nearby replicas without the distributed - // timestamp negotiation overhead of `max_staleness`. - google.protobuf.Duration exact_staleness = 5; - } - - // If true, the Cloud Spanner-selected read timestamp is included in - // the [Transaction][google.spanner.v1.Transaction] message that describes the transaction. - bool return_read_timestamp = 6; - } - - // Required. The type of transaction. - oneof mode { - // Transaction may write. - // - // Authorization to begin a read-write transaction requires - // `spanner.databases.beginOrRollbackReadWriteTransaction` permission - // on the `session` resource. - ReadWrite read_write = 1; - - // Partitioned DML transaction. - // - // Authorization to begin a Partitioned DML transaction requires - // `spanner.databases.beginPartitionedDmlTransaction` permission - // on the `session` resource. - PartitionedDml partitioned_dml = 3; - - // Transaction will not write. - // - // Authorization to begin a read-only transaction requires - // `spanner.databases.beginReadOnlyTransaction` permission - // on the `session` resource. - ReadOnly read_only = 2; - } -} - -// A transaction. -message Transaction { - // `id` may be used to identify the transaction in subsequent - // [Read][google.spanner.v1.Spanner.Read], - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], - // [Commit][google.spanner.v1.Spanner.Commit], or - // [Rollback][google.spanner.v1.Spanner.Rollback] calls. - // - // Single-use read-only transactions do not have IDs, because - // single-use transactions do not support multiple requests. - bytes id = 1; - - // For snapshot read-only transactions, the read timestamp chosen - // for the transaction. Not returned by default: see - // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp]. - // - // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds. - // Example: `"2014-10-02T15:01:23.045123456Z"`. - google.protobuf.Timestamp read_timestamp = 2; -} - -// This message is used to select the transaction in which a -// [Read][google.spanner.v1.Spanner.Read] or -// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. -// -// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more information about transactions. -message TransactionSelector { - // If no fields are set, the default is a single use transaction - // with strong concurrency. - oneof selector { - // Execute the read or SQL query in a temporary transaction. - // This is the most efficient way to execute a transaction that - // consists of a single SQL query. - TransactionOptions single_use = 1; - - // Execute the read or SQL query in a previously-started transaction. - bytes id = 2; - - // Begin a new transaction and execute this read or SQL query in - // it. The transaction ID of the new transaction is returned in - // [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction], which is a [Transaction][google.spanner.v1.Transaction]. - TransactionOptions begin = 3; - } -} diff --git a/spanner/google/cloud/spanner_v1/proto/transaction_pb2.py b/spanner/google/cloud/spanner_v1/proto/transaction_pb2.py deleted file mode 100644 index aa83e3373cb1..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/transaction_pb2.py +++ /dev/null @@ -1,1019 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/transaction.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/transaction.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\020TransactionProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n/google/cloud/spanner_v1/proto/transaction.proto\x12\x11google.spanner.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xc3\x04\n\x12TransactionOptions\x12\x45\n\nread_write\x18\x01 \x01(\x0b\x32/.google.spanner.v1.TransactionOptions.ReadWriteH\x00\x12O\n\x0fpartitioned_dml\x18\x03 \x01(\x0b\x32\x34.google.spanner.v1.TransactionOptions.PartitionedDmlH\x00\x12\x43\n\tread_only\x18\x02 \x01(\x0b\x32..google.spanner.v1.TransactionOptions.ReadOnlyH\x00\x1a\x0b\n\tReadWrite\x1a\x10\n\x0ePartitionedDml\x1a\xa8\x02\n\x08ReadOnly\x12\x10\n\x06strong\x18\x01 \x01(\x08H\x00\x12\x38\n\x12min_read_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x32\n\rmax_staleness\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x34\n\x0eread_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x34\n\x0f\x65xact_staleness\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x1d\n\x15return_read_timestamp\x18\x06 \x01(\x08\x42\x11\n\x0ftimestamp_boundB\x06\n\x04mode"M\n\x0bTransaction\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\x32\n\x0eread_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xa4\x01\n\x13TransactionSelector\x12;\n\nsingle_use\x18\x01 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12\x0c\n\x02id\x18\x02 \x01(\x0cH\x00\x12\x36\n\x05\x62\x65gin\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x42\n\n\x08selectorB\x99\x01\n\x15\x63om.google.spanner.v1B\x10TransactionProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_TRANSACTIONOPTIONS_READWRITE = _descriptor.Descriptor( - name="ReadWrite", - full_name="google.spanner.v1.TransactionOptions.ReadWrite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=409, - serialized_end=420, -) - -_TRANSACTIONOPTIONS_PARTITIONEDDML = _descriptor.Descriptor( - name="PartitionedDml", - full_name="google.spanner.v1.TransactionOptions.PartitionedDml", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=422, - serialized_end=438, -) - -_TRANSACTIONOPTIONS_READONLY = _descriptor.Descriptor( - name="ReadOnly", - full_name="google.spanner.v1.TransactionOptions.ReadOnly", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="strong", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.strong", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="min_read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="max_staleness", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.max_staleness", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="exact_staleness", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="return_read_timestamp", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="timestamp_bound", - full_name="google.spanner.v1.TransactionOptions.ReadOnly.timestamp_bound", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=441, - serialized_end=737, -) - -_TRANSACTIONOPTIONS = _descriptor.Descriptor( - name="TransactionOptions", - full_name="google.spanner.v1.TransactionOptions", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="read_write", - full_name="google.spanner.v1.TransactionOptions.read_write", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="partitioned_dml", - full_name="google.spanner.v1.TransactionOptions.partitioned_dml", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_only", - full_name="google.spanner.v1.TransactionOptions.read_only", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[ - _TRANSACTIONOPTIONS_READWRITE, - _TRANSACTIONOPTIONS_PARTITIONEDDML, - _TRANSACTIONOPTIONS_READONLY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mode", - full_name="google.spanner.v1.TransactionOptions.mode", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=166, - serialized_end=745, -) - - -_TRANSACTION = _descriptor.Descriptor( - name="Transaction", - full_name="google.spanner.v1.Transaction", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.spanner.v1.Transaction.id", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="read_timestamp", - full_name="google.spanner.v1.Transaction.read_timestamp", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=747, - serialized_end=824, -) - - -_TRANSACTIONSELECTOR = _descriptor.Descriptor( - name="TransactionSelector", - full_name="google.spanner.v1.TransactionSelector", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="single_use", - full_name="google.spanner.v1.TransactionSelector.single_use", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="id", - full_name="google.spanner.v1.TransactionSelector.id", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="begin", - full_name="google.spanner.v1.TransactionSelector.begin", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="selector", - full_name="google.spanner.v1.TransactionSelector.selector", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=827, - serialized_end=991, -) - -_TRANSACTIONOPTIONS_READWRITE.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_PARTITIONEDDML.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "min_read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "max_staleness" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "exact_staleness" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_TRANSACTIONOPTIONS_READONLY.containing_type = _TRANSACTIONOPTIONS -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["strong"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "strong" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["min_read_timestamp"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "min_read_timestamp" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["max_staleness"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "max_staleness" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["read_timestamp"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "read_timestamp" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"].fields.append( - _TRANSACTIONOPTIONS_READONLY.fields_by_name["exact_staleness"] -) -_TRANSACTIONOPTIONS_READONLY.fields_by_name[ - "exact_staleness" -].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["timestamp_bound"] -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].message_type = _TRANSACTIONOPTIONS_READWRITE -_TRANSACTIONOPTIONS.fields_by_name[ - "partitioned_dml" -].message_type = _TRANSACTIONOPTIONS_PARTITIONEDDML -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].message_type = _TRANSACTIONOPTIONS_READONLY -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_write"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_write" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["partitioned_dml"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "partitioned_dml" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append( - _TRANSACTIONOPTIONS.fields_by_name["read_only"] -) -_TRANSACTIONOPTIONS.fields_by_name[ - "read_only" -].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"] -_TRANSACTION.fields_by_name[ - "read_timestamp" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_TRANSACTIONSELECTOR.fields_by_name["single_use"].message_type = _TRANSACTIONOPTIONS -_TRANSACTIONSELECTOR.fields_by_name["begin"].message_type = _TRANSACTIONOPTIONS -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["single_use"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "single_use" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["id"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "id" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -_TRANSACTIONSELECTOR.oneofs_by_name["selector"].fields.append( - _TRANSACTIONSELECTOR.fields_by_name["begin"] -) -_TRANSACTIONSELECTOR.fields_by_name[ - "begin" -].containing_oneof = _TRANSACTIONSELECTOR.oneofs_by_name["selector"] -DESCRIPTOR.message_types_by_name["TransactionOptions"] = _TRANSACTIONOPTIONS -DESCRIPTOR.message_types_by_name["Transaction"] = _TRANSACTION -DESCRIPTOR.message_types_by_name["TransactionSelector"] = _TRANSACTIONSELECTOR -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -TransactionOptions = _reflection.GeneratedProtocolMessageType( - "TransactionOptions", - (_message.Message,), - dict( - ReadWrite=_reflection.GeneratedProtocolMessageType( - "ReadWrite", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONOPTIONS_READWRITE, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""Message type to initiate a read-write transaction. - Currently this transaction type has no options. - - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadWrite) - ), - ), - PartitionedDml=_reflection.GeneratedProtocolMessageType( - "PartitionedDml", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONOPTIONS_PARTITIONEDDML, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""Message type to initiate a Partitioned DML transaction. - - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.PartitionedDml) - ), - ), - ReadOnly=_reflection.GeneratedProtocolMessageType( - "ReadOnly", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONOPTIONS_READONLY, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""Message type to initiate a read-only transaction. - - - Attributes: - timestamp_bound: - How to choose the timestamp for the read-only transaction. - strong: - Read at a timestamp where all previously committed - transactions are visible. - min_read_timestamp: - Executes all reads at a timestamp >= ``min_read_timestamp``. - This is useful for requesting fresher data than some previous - read, or data that is fresh enough to observe the effects of - some previously committed transaction whose timestamp is - known. Note that this option can only be used in single-use - transactions. A timestamp in RFC3339 UTC "Zulu" format, - accurate to nanoseconds. Example: - ``"2014-10-02T15:01:23.045123456Z"``. - max_staleness: - Read data at a timestamp >= ``NOW - max_staleness`` seconds. - Guarantees that all writes that have committed more than the - specified number of seconds ago are visible. Because Cloud - Spanner chooses the exact timestamp, this mode works even if - the client's local clock is substantially skewed from Cloud - Spanner commit timestamps. Useful for reading the freshest - data available at a nearby replica, while bounding the - possible staleness if the local replica has fallen behind. - Note that this option can only be used in single-use - transactions. - read_timestamp: - Executes all reads at the given timestamp. Unlike other modes, - reads at a specific timestamp are repeatable; the same read at - the same timestamp always returns the same data. If the - timestamp is in the future, the read will block until the - specified timestamp, modulo the read's deadline. Useful for - large scale consistent reads such as mapreduces, or for - coordinating many reads against a consistent snapshot of the - data. A timestamp in RFC3339 UTC "Zulu" format, accurate to - nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. - exact_staleness: - Executes all reads at a timestamp that is ``exact_staleness`` - old. The timestamp is chosen soon after the read is started. - Guarantees that all writes that have committed more than the - specified number of seconds ago are visible. Because Cloud - Spanner chooses the exact timestamp, this mode works even if - the client's local clock is substantially skewed from Cloud - Spanner commit timestamps. Useful for reading at nearby - replicas without the distributed timestamp negotiation - overhead of ``max_staleness``. - return_read_timestamp: - If true, the Cloud Spanner-selected read timestamp is included - in the [Transaction][google.spanner.v1.Transaction] message - that describes the transaction. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions.ReadOnly) - ), - ), - DESCRIPTOR=_TRANSACTIONOPTIONS, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""# Transactions - - Each session can have at most one active transaction at a time. After - the active transaction is completed, the session can immediately be - re-used for the next transaction. It is not necessary to create a new - session for each transaction. - - Transaction Modes - - - Cloud Spanner supports three transaction modes: - - 1. Locking read-write. This type of transaction is the only way to write - data into Cloud Spanner. These transactions rely on pessimistic - locking and, if necessary, two-phase commit. Locking read-write - transactions may abort, requiring the application to retry. - - 2. Snapshot read-only. This transaction type provides guaranteed - consistency across several reads, but does not allow writes. Snapshot - read-only transactions can be configured to read at timestamps in the - past. Snapshot read-only transactions do not need to be committed. - - 3. Partitioned DML. This type of transaction is used to execute a single - Partitioned DML statement. Partitioned DML partitions the key space - and runs the DML statement over each partition in parallel using - separate, internal transactions that commit independently. - Partitioned DML transactions do not need to be committed. - - For transactions that only read, snapshot read-only transactions provide - simpler semantics and are almost always faster. In particular, read-only - transactions do not take locks, so they do not conflict with read-write - transactions. As a consequence of not taking locks, they also do not - abort, so retry loops are not needed. - - Transactions may only read/write data in a single database. They may, - however, read/write data in different tables within that database. - - Locking Read-Write Transactions - - - Locking transactions may be used to atomically read-modify-write data - anywhere in a database. This type of transaction is externally - consistent. - - Clients should attempt to minimize the amount of time a transaction is - active. Faster transactions commit with higher probability and cause - less contention. Cloud Spanner attempts to keep read locks active as - long as the transaction continues to do reads, and the transaction has - not been terminated by [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of - inactivity at the client may cause Cloud Spanner to release a - transaction's locks and abort it. - - Conceptually, a read-write transaction consists of zero or more reads or - SQL statements followed by [Commit][google.spanner.v1.Spanner.Commit]. - At any time before [Commit][google.spanner.v1.Spanner.Commit], the - client can send a [Rollback][google.spanner.v1.Spanner.Rollback] request - to abort the transaction. - - Semantics - - - Cloud Spanner can commit the transaction if all read locks it acquired - are still valid at commit time, and it is able to acquire write locks - for all writes. Cloud Spanner can abort the transaction for any reason. - If a commit attempt returns ``ABORTED``, Cloud Spanner guarantees that - the transaction has not modified any user data in Cloud Spanner. - - Unless the transaction commits, Cloud Spanner makes no guarantees about - how long the transaction's locks were held for. It is an error to use - Cloud Spanner locks for any sort of mutual exclusion other than between - Cloud Spanner transactions themselves. - - Retrying Aborted Transactions - - - When a transaction aborts, the application can choose to retry the whole - transaction again. To maximize the chances of successfully committing - the retry, the client should execute the retry in the same session as - the original attempt. The original session's lock priority increases - with each consecutive abort, meaning that each attempt has a slightly - better chance of success than the previous. - - Under some circumstances (e.g., many transactions attempting to modify - the same row(s)), a transaction can abort many times in a short period - before successfully committing. Thus, it is not a good idea to cap the - number of retries a transaction can attempt; instead, it is better to - limit the total amount of wall time spent retrying. - - Idle Transactions - - - A transaction is considered idle if it has no outstanding reads or SQL - queries and has not started a read or SQL query within the last 10 - seconds. Idle transactions can be aborted by Cloud Spanner so that they - don't hold on to locks indefinitely. In that case, the commit will fail - with error ``ABORTED``. - - If this behavior is undesirable, periodically executing a simple SQL - query in the transaction (e.g., ``SELECT 1``) prevents the transaction - from becoming idle. - - Snapshot Read-Only Transactions - - - Snapshot read-only transactions provides a simpler method than locking - read-write transactions for doing several consistent reads. However, - this type of transaction does not support writes. - - Snapshot transactions do not take locks. Instead, they work by choosing - a Cloud Spanner timestamp, then executing all reads at that timestamp. - Since they do not acquire locks, they do not block concurrent read-write - transactions. - - Unlike locking read-write transactions, snapshot read-only transactions - never abort. They can fail if the chosen read timestamp is garbage - collected; however, the default garbage collection policy is generous - enough that most applications do not need to worry about this in - practice. - - Snapshot read-only transactions do not need to call - [Commit][google.spanner.v1.Spanner.Commit] or - [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not - permitted to do so). - - To execute a snapshot transaction, the client specifies a timestamp - bound, which tells Cloud Spanner how to choose a read timestamp. - - The types of timestamp bound are: - - - Strong (the default). - - Bounded staleness. - - Exact staleness. - - If the Cloud Spanner database to be read is geographically distributed, - stale read-only transactions can execute more quickly than strong or - read-write transaction, because they are able to execute far from the - leader replica. - - Each type of timestamp bound is discussed in detail below. - - Strong - - - Strong reads are guaranteed to see the effects of all transactions that - have committed before the start of the read. Furthermore, all rows - yielded by a single read are consistent with each other -- if any part - of the read observes a transaction, all parts of the read see the - transaction. - - Strong reads are not repeatable: two consecutive strong read-only - transactions might return inconsistent results if there are concurrent - writes. If consistency across reads is required, the reads should be - executed within a transaction or at an exact read timestamp. - - See - [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - - Exact Staleness - - - These timestamp bounds execute reads at a user-specified timestamp. - Reads at a timestamp are guaranteed to see a consistent prefix of the - global transaction history: they observe modifications done by all - transactions with a commit timestamp <= the read timestamp, and observe - none of the modifications done by transactions with a larger commit - timestamp. They will block until all conflicting transactions that may - be assigned commit timestamps <= the read timestamp have finished. - - The timestamp can either be expressed as an absolute Cloud Spanner - commit timestamp or a staleness relative to the current time. - - These modes do not require a "negotiation phase" to pick a timestamp. As - a result, they execute slightly faster than the equivalent boundedly - stale concurrency modes. On the other hand, boundedly stale reads - usually return fresher results. - - See - [TransactionOptions.ReadOnly.read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read\_timestamp] - and - [TransactionOptions.ReadOnly.exact\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact\_staleness]. - - Bounded Staleness - - - Bounded staleness modes allow Cloud Spanner to pick the read timestamp, - subject to a user-provided staleness bound. Cloud Spanner chooses the - newest timestamp within the staleness bound that allows execution of the - reads at the closest available replica without blocking. - - All rows yielded are consistent with each other -- if any part of the - read observes a transaction, all parts of the read see the transaction. - Boundedly stale reads are not repeatable: two stale reads, even if they - use the same staleness bound, can execute at different timestamps and - thus return inconsistent results. - - Boundedly stale reads execute in two phases: the first phase negotiates - a timestamp among all replicas needed to serve the read. In the second - phase, reads are executed at the negotiated timestamp. - - As a result of the two phase execution, bounded staleness reads are - usually a little slower than comparable exact staleness reads. However, - they are typically able to return fresher results, and are more likely - to execute at the closest replica. - - Because the timestamp negotiation requires up-front knowledge of which - rows will be read, it can only be used with single-use read-only - transactions. - - See - [TransactionOptions.ReadOnly.max\_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max\_staleness] - and - [TransactionOptions.ReadOnly.min\_read\_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min\_read\_timestamp]. - - Old Read Timestamps and Garbage Collection - - - Cloud Spanner continuously garbage collects deleted and overwritten data - in the background to reclaim storage space. This process is known as - "version GC". By default, version GC reclaims versions after they are - one hour old. Because of this, Cloud Spanner cannot perform reads at - read timestamps more than one hour in the past. This restriction also - applies to in-progress reads and/or SQL queries whose timestamp become - too old while executing. Reads and SQL queries with too-old read - timestamps fail with the error ``FAILED_PRECONDITION``. - - Partitioned DML Transactions - - - Partitioned DML transactions are used to execute DML statements with a - different execution strategy that provides different, and often better, - scalability properties for large, table-wide operations than DML in a - ReadWrite transaction. Smaller scoped statements, such as an OLTP - workload, should prefer using ReadWrite transactions. - - Partitioned DML partitions the keyspace and runs the DML statement on - each partition in separate, internal transactions. These transactions - commit automatically when complete, and run independently from one - another. - - To reduce lock contention, this execution strategy only acquires read - locks on rows that match the WHERE clause of the statement. - Additionally, the smaller per-partition transactions hold locks for less - time. - - That said, Partitioned DML is not a drop-in replacement for standard DML - used in ReadWrite transactions. - - - The DML statement must be fully-partitionable. Specifically, the - statement must be expressible as the union of many statements which - each access only a single row of the table. - - - The statement is not applied atomically to all rows of the table. - Rather, the statement is applied atomically to partitions of the - table, in independent transactions. Secondary index rows are updated - atomically with the base table rows. - - - Partitioned DML does not guarantee exactly-once execution semantics - against a partition. The statement will be applied at least once to - each partition. It is strongly recommended that the DML statement - should be idempotent to avoid unexpected results. For instance, it is - potentially dangerous to run a statement such as - ``UPDATE table SET column = column + 1`` as it could be run multiple - times against some rows. - - - The partitions are committed automatically - there is no support for - Commit or Rollback. If the call returns an error, or if the client - issuing the ExecuteSql call dies, it is possible that some rows had - the statement executed on them successfully. It is also possible that - statement was never executed against other rows. - - - Partitioned DML transactions may only contain the execution of a - single DML statement via ExecuteSql or ExecuteStreamingSql. - - - If any error is encountered during the execution of the partitioned - DML operation (for instance, a UNIQUE INDEX violation, division by - zero, or a value that cannot be stored due to schema constraints), - then the operation is stopped at that point and an error is returned. - It is possible that at this point, some partitions have been - committed (or even committed multiple times), and other partitions - have not been run at all. - - Given the above, Partitioned DML is good fit for large, database-wide, - operations that are idempotent, such as deleting old rows from a very - large table. - - - Attributes: - mode: - Required. The type of transaction. - read_write: - Transaction may write. Authorization to begin a read-write - transaction requires - ``spanner.databases.beginOrRollbackReadWriteTransaction`` - permission on the ``session`` resource. - partitioned_dml: - Partitioned DML transaction. Authorization to begin a - Partitioned DML transaction requires - ``spanner.databases.beginPartitionedDmlTransaction`` - permission on the ``session`` resource. - read_only: - Transaction will not write. Authorization to begin a read- - only transaction requires - ``spanner.databases.beginReadOnlyTransaction`` permission on - the ``session`` resource. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionOptions) - ), -) -_sym_db.RegisterMessage(TransactionOptions) -_sym_db.RegisterMessage(TransactionOptions.ReadWrite) -_sym_db.RegisterMessage(TransactionOptions.PartitionedDml) -_sym_db.RegisterMessage(TransactionOptions.ReadOnly) - -Transaction = _reflection.GeneratedProtocolMessageType( - "Transaction", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTION, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""A transaction. - - - Attributes: - id: - \ ``id`` may be used to identify the transaction in subsequent - [Read][google.spanner.v1.Spanner.Read], - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], - [Commit][google.spanner.v1.Spanner.Commit], or - [Rollback][google.spanner.v1.Spanner.Rollback] calls. Single- - use read-only transactions do not have IDs, because single-use - transactions do not support multiple requests. - read_timestamp: - For snapshot read-only transactions, the read timestamp chosen - for the transaction. Not returned by default: see [Transaction - Options.ReadOnly.return\_read\_timestamp][google.spanner.v1.Tr - ansactionOptions.ReadOnly.return\_read\_timestamp]. A - timestamp in RFC3339 UTC "Zulu" format, accurate to - nanoseconds. Example: ``"2014-10-02T15:01:23.045123456Z"``. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Transaction) - ), -) -_sym_db.RegisterMessage(Transaction) - -TransactionSelector = _reflection.GeneratedProtocolMessageType( - "TransactionSelector", - (_message.Message,), - dict( - DESCRIPTOR=_TRANSACTIONSELECTOR, - __module__="google.cloud.spanner_v1.proto.transaction_pb2", - __doc__="""This message is used to select the transaction in which a - [Read][google.spanner.v1.Spanner.Read] or - [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs. - - See [TransactionOptions][google.spanner.v1.TransactionOptions] for more - information about transactions. - - - Attributes: - selector: - If no fields are set, the default is a single use transaction - with strong concurrency. - single_use: - Execute the read or SQL query in a temporary transaction. This - is the most efficient way to execute a transaction that - consists of a single SQL query. - id: - Execute the read or SQL query in a previously-started - transaction. - begin: - Begin a new transaction and execute this read or SQL query in - it. The transaction ID of the new transaction is returned in [ - ResultSetMetadata.transaction][google.spanner.v1.ResultSetMeta - data.transaction], which is a - [Transaction][google.spanner.v1.Transaction]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.TransactionSelector) - ), -) -_sym_db.RegisterMessage(TransactionSelector) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/transaction_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/proto/type.proto b/spanner/google/cloud/spanner_v1/proto/type.proto deleted file mode 100644 index ccef29143e9a..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/type.proto +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.spanner.v1; - -import "google/api/annotations.proto"; - -option csharp_namespace = "Google.Cloud.Spanner.V1"; -option go_package = "google.golang.org/genproto/googleapis/spanner/v1;spanner"; -option java_multiple_files = true; -option java_outer_classname = "TypeProto"; -option java_package = "com.google.spanner.v1"; -option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to -// indicate the type of a Cloud Spanner value. -// -// Each legal value of a type can be encoded to or decoded from a JSON -// value, using the encodings described below. All Cloud Spanner values can -// be `null`, regardless of type; `null`s are always encoded as a JSON -// `null`. -enum TypeCode { - // Not specified. - TYPE_CODE_UNSPECIFIED = 0; - - // Encoded as JSON `true` or `false`. - BOOL = 1; - - // Encoded as `string`, in decimal format. - INT64 = 2; - - // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or - // `"-Infinity"`. - FLOAT64 = 3; - - // Encoded as `string` in RFC 3339 timestamp format. The time zone - // must be present, and must be `"Z"`. - // - // If the schema has the column option - // `allow_commit_timestamp=true`, the placeholder string - // `"spanner.commit_timestamp()"` can be used to instruct the system - // to insert the commit timestamp associated with the transaction - // commit. - TIMESTAMP = 4; - - // Encoded as `string` in RFC 3339 date format. - DATE = 5; - - // Encoded as `string`. - STRING = 6; - - // Encoded as a base64-encoded `string`, as described in RFC 4648, - // section 4. - BYTES = 7; - - // Encoded as `list`, where the list elements are represented - // according to - // [array_element_type][google.spanner.v1.Type.array_element_type]. - ARRAY = 8; - - // Encoded as `list`, where list element `i` is represented according - // to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. - STRUCT = 9; -} - -// `Type` indicates the type of a Cloud Spanner value, as might be stored in a -// table cell or returned from an SQL query. -message Type { - // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type. - TypeCode code = 1; - - // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type` - // is the type of the array elements. - Type array_element_type = 2; - - // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type` - // provides type information for the struct's fields. - StructType struct_type = 3; -} - -// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. -message StructType { - // Message representing a single field of a struct. - message Field { - // The name of the field. For reads, this is the column name. For - // SQL queries, it is the column alias (e.g., `"Word"` in the - // query `"SELECT 'hello' AS Word"`), or the column name (e.g., - // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some - // columns might have an empty name (e.g., !"SELECT - // UPPER(ColName)"`). Note that a query result can contain - // multiple fields with the same name. - string name = 1; - - // The type of the field. - Type type = 2; - } - - // The list of fields that make up this struct. Order is - // significant, because values of this struct type are represented as - // lists, where the order of field values matches the order of - // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields - // matches the order of columns in a read request, or the order of - // fields in the `SELECT` clause of a query. - repeated Field fields = 1; -} diff --git a/spanner/google/cloud/spanner_v1/proto/type_pb2.py b/spanner/google/cloud/spanner_v1/proto/type_pb2.py deleted file mode 100644 index 2ef35b36c655..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/type_pb2.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/spanner_v1/proto/type.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/spanner_v1/proto/type.proto", - package="google.spanner.v1", - syntax="proto3", - serialized_options=_b( - "\n\025com.google.spanner.v1B\tTypeProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1" - ), - serialized_pb=_b( - '\n(google/cloud/spanner_v1/proto/type.proto\x12\x11google.spanner.v1\x1a\x1cgoogle/api/annotations.proto"\x9a\x01\n\x04Type\x12)\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1b.google.spanner.v1.TypeCode\x12\x33\n\x12\x61rray_element_type\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type\x12\x32\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\x1d.google.spanner.v1.StructType"\x7f\n\nStructType\x12\x33\n\x06\x66ields\x18\x01 \x03(\x0b\x32#.google.spanner.v1.StructType.Field\x1a<\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type*\x8e\x01\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x08\n\x04\x42OOL\x10\x01\x12\t\n\x05INT64\x10\x02\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\x08\n\x04\x44\x41TE\x10\x05\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x42YTES\x10\x07\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\tB\x92\x01\n\x15\x63om.google.spanner.v1B\tTypeProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], -) - -_TYPECODE = _descriptor.EnumDescriptor( - name="TypeCode", - full_name="google.spanner.v1.TypeCode", - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_CODE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - ), - _descriptor.EnumValueDescriptor( - name="BOOL", index=1, number=1, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="INT64", index=2, number=2, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="FLOAT64", index=3, number=3, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="TIMESTAMP", index=4, number=4, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="DATE", index=5, number=5, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="STRING", index=6, number=6, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="BYTES", index=7, number=7, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="ARRAY", index=8, number=8, serialized_options=None, type=None - ), - _descriptor.EnumValueDescriptor( - name="STRUCT", index=9, number=9, serialized_options=None, type=None - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=380, - serialized_end=522, -) -_sym_db.RegisterEnumDescriptor(_TYPECODE) - -TypeCode = enum_type_wrapper.EnumTypeWrapper(_TYPECODE) -TYPE_CODE_UNSPECIFIED = 0 -BOOL = 1 -INT64 = 2 -FLOAT64 = 3 -TIMESTAMP = 4 -DATE = 5 -STRING = 6 -BYTES = 7 -ARRAY = 8 -STRUCT = 9 - - -_TYPE = _descriptor.Descriptor( - name="Type", - full_name="google.spanner.v1.Type", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="code", - full_name="google.spanner.v1.Type.code", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="array_element_type", - full_name="google.spanner.v1.Type.array_element_type", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="struct_type", - full_name="google.spanner.v1.Type.struct_type", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=94, - serialized_end=248, -) - - -_STRUCTTYPE_FIELD = _descriptor.Descriptor( - name="Field", - full_name="google.spanner.v1.StructType.Field", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.spanner.v1.StructType.Field.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.spanner.v1.StructType.Field.type", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=317, - serialized_end=377, -) - -_STRUCTTYPE = _descriptor.Descriptor( - name="StructType", - full_name="google.spanner.v1.StructType", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="fields", - full_name="google.spanner.v1.StructType.fields", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[_STRUCTTYPE_FIELD], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=250, - serialized_end=377, -) - -_TYPE.fields_by_name["code"].enum_type = _TYPECODE -_TYPE.fields_by_name["array_element_type"].message_type = _TYPE -_TYPE.fields_by_name["struct_type"].message_type = _STRUCTTYPE -_STRUCTTYPE_FIELD.fields_by_name["type"].message_type = _TYPE -_STRUCTTYPE_FIELD.containing_type = _STRUCTTYPE -_STRUCTTYPE.fields_by_name["fields"].message_type = _STRUCTTYPE_FIELD -DESCRIPTOR.message_types_by_name["Type"] = _TYPE -DESCRIPTOR.message_types_by_name["StructType"] = _STRUCTTYPE -DESCRIPTOR.enum_types_by_name["TypeCode"] = _TYPECODE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Type = _reflection.GeneratedProtocolMessageType( - "Type", - (_message.Message,), - dict( - DESCRIPTOR=_TYPE, - __module__="google.cloud.spanner_v1.proto.type_pb2", - __doc__="""\ ``Type`` indicates the type of a Cloud Spanner value, as - might be stored in a table cell or returned from an SQL query. - - - Attributes: - code: - Required. The [TypeCode][google.spanner.v1.TypeCode] for this - type. - array_element_type: - If [code][google.spanner.v1.Type.code] == - [ARRAY][google.spanner.v1.TypeCode.ARRAY], then - ``array_element_type`` is the type of the array elements. - struct_type: - If [code][google.spanner.v1.Type.code] == - [STRUCT][google.spanner.v1.TypeCode.STRUCT], then - ``struct_type`` provides type information for the struct's - fields. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.Type) - ), -) -_sym_db.RegisterMessage(Type) - -StructType = _reflection.GeneratedProtocolMessageType( - "StructType", - (_message.Message,), - dict( - Field=_reflection.GeneratedProtocolMessageType( - "Field", - (_message.Message,), - dict( - DESCRIPTOR=_STRUCTTYPE_FIELD, - __module__="google.cloud.spanner_v1.proto.type_pb2", - __doc__="""Message representing a single field of a struct. - - - Attributes: - name: - The name of the field. For reads, this is the column name. For - SQL queries, it is the column alias (e.g., ``"Word"`` in the - query ``"SELECT 'hello' AS Word"``), or the column name (e.g., - ``"ColName"`` in the query ``"SELECT ColName FROM Table"``). - Some columns might have an empty name (e.g., !"SELECT - UPPER(ColName)"\`). Note that a query result can contain - multiple fields with the same name. - type: - The type of the field. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.StructType.Field) - ), - ), - DESCRIPTOR=_STRUCTTYPE, - __module__="google.cloud.spanner_v1.proto.type_pb2", - __doc__="""\ ``StructType`` defines the fields of a - [STRUCT][google.spanner.v1.TypeCode.STRUCT] type. - - - Attributes: - fields: - The list of fields that make up this struct. Order is - significant, because values of this struct type are - represented as lists, where the order of field values matches - the order of fields in the - [StructType][google.spanner.v1.StructType]. In turn, the order - of fields matches the order of columns in a read request, or - the order of fields in the ``SELECT`` clause of a query. - """, - # @@protoc_insertion_point(class_scope:google.spanner.v1.StructType) - ), -) -_sym_db.RegisterMessage(StructType) -_sym_db.RegisterMessage(StructType.Field) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/spanner/google/cloud/spanner_v1/proto/type_pb2_grpc.py b/spanner/google/cloud/spanner_v1/proto/type_pb2_grpc.py deleted file mode 100644 index 07cb78fe03a9..000000000000 --- a/spanner/google/cloud/spanner_v1/proto/type_pb2_grpc.py +++ /dev/null @@ -1,2 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc diff --git a/spanner/google/cloud/spanner_v1/session.py b/spanner/google/cloud/spanner_v1/session.py deleted file mode 100644 index f8e7e88d9731..000000000000 --- a/spanner/google/cloud/spanner_v1/session.py +++ /dev/null @@ -1,381 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrapper for Cloud Spanner Session objects.""" - -from functools import total_ordering -import time - -from google.rpc.error_details_pb2 import RetryInfo - -# pylint: disable=ungrouped-imports -from google.api_core.exceptions import Aborted, GoogleAPICallError, NotFound -import google.api_core.gapic_v1.method -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1.batch import Batch -from google.cloud.spanner_v1.snapshot import Snapshot -from google.cloud.spanner_v1.transaction import Transaction -import random - -# pylint: enable=ungrouped-imports - - -DEFAULT_RETRY_TIMEOUT_SECS = 30 -"""Default timeout used by :meth:`Session.run_in_transaction`.""" - - -@total_ordering -class Session(object): - """Representation of a Cloud Spanner Session. - - We can use a :class:`Session` to: - - * :meth:`create` the session - * Use :meth:`exists` to check for the existence of the session - * :meth:`drop` the session - - :type database: :class:`~google.cloud.spanner_v1.database.Database` - :param database: The database to which the session is bound. - - :type labels: dict (str -> str) - :param labels: (Optional) User-assigned labels for the session. - """ - - _session_id = None - _transaction = None - - def __init__(self, database, labels=None): - self._database = database - if labels is None: - labels = {} - self._labels = labels - - def __lt__(self, other): - return self._session_id < other._session_id - - @property - def session_id(self): - """Read-only ID, set by the back-end during :meth:`create`.""" - return self._session_id - - @property - def labels(self): - """User-assigned labels for the session. - - :rtype: dict (str -> str) - :returns: the labels dict (empty if no labels were assigned. - """ - return self._labels - - @property - def name(self): - """Session name used in requests. - - .. note:: - - This property will not change if ``session_id`` does not, but the - return value is not cached. - - The session name is of the form - - ``"projects/../instances/../databases/../sessions/{session_id}"`` - - :rtype: str - :returns: The session name. - :raises ValueError: if session is not yet created - """ - if self._session_id is None: - raise ValueError("No session ID set by back-end") - return self._database.name + "/sessions/" + self._session_id - - def create(self): - """Create this session, bound to its database. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.CreateSession - - :raises: :exc:`ValueError` if :attr:`session_id` is already set. - """ - if self._session_id is not None: - raise ValueError("Session ID already set by back-end") - api = self._database.spanner_api - metadata = _metadata_with_prefix(self._database.name) - kw = {} - if self._labels: - kw = {"session": {"labels": self._labels}} - session_pb = api.create_session(self._database.name, metadata=metadata, **kw) - self._session_id = session_pb.name.split("/")[-1] - - def exists(self): - """Test for the existence of this session. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.GetSession - - :rtype: bool - :returns: True if the session exists on the back-end, else False. - """ - if self._session_id is None: - return False - api = self._database.spanner_api - metadata = _metadata_with_prefix(self._database.name) - try: - api.get_session(self.name, metadata=metadata) - except NotFound: - return False - - return True - - def delete(self): - """Delete this session. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.Spanner.GetSession - - :raises ValueError: if :attr:`session_id` is not already set. - :raises NotFound: if the session does not exist - """ - if self._session_id is None: - raise ValueError("Session ID not set by back-end") - api = self._database.spanner_api - metadata = _metadata_with_prefix(self._database.name) - - api.delete_session(self.name, metadata=metadata) - - def snapshot(self, **kw): - """Create a snapshot to perform a set of reads with shared staleness. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly - - :type kw: dict - :param kw: Passed through to - :class:`~google.cloud.spanner_v1.snapshot.Snapshot` ctor. - - :rtype: :class:`~google.cloud.spanner_v1.snapshot.Snapshot` - :returns: a snapshot bound to this session - :raises ValueError: if the session has not yet been created. - """ - if self._session_id is None: - raise ValueError("Session has not been created.") - - return Snapshot(self, **kw) - - def read(self, table, columns, keyset, index="", limit=0): - """Perform a ``StreamingRead`` API request for rows in a table. - - :type table: str - :param table: name of the table from which to fetch data - - :type columns: list of str - :param columns: names of columns to be retrieved - - :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` - :param keyset: keys / ranges identifying rows to be retrieved - - :type index: str - :param index: (Optional) name of index to use, rather than the - table's primary key - - :type limit: int - :param limit: (Optional) maximum number of rows to return - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - """ - return self.snapshot().read(table, columns, keyset, index, limit) - - def execute_sql( - self, - sql, - params=None, - param_types=None, - query_mode=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - ): - """Perform an ``ExecuteStreamingSql`` API request. - - :type sql: str - :param sql: SQL query statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``sql``. - - :type param_types: - dict, {str -> :class:`google.spanner.v1.type_pb2.TypeCode`} - :param param_types: (Optional) explicit types for one or more param - values; overrides default type detection on the - back-end. - - :type query_mode: - :class:`google.spanner.v1.spanner_pb2.ExecuteSqlRequest.QueryMode` - :param query_mode: Mode governing return of results / query plan. See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - """ - return self.snapshot().execute_sql( - sql, params, param_types, query_mode, retry=retry, timeout=timeout - ) - - def batch(self): - """Factory to create a batch for this session. - - :rtype: :class:`~google.cloud.spanner_v1.batch.Batch` - :returns: a batch bound to this session - :raises ValueError: if the session has not yet been created. - """ - if self._session_id is None: - raise ValueError("Session has not been created.") - - return Batch(self) - - def transaction(self): - """Create a transaction to perform a set of reads with shared staleness. - - :rtype: :class:`~google.cloud.spanner_v1.transaction.Transaction` - :returns: a transaction bound to this session - :raises ValueError: if the session has not yet been created. - """ - if self._session_id is None: - raise ValueError("Session has not been created.") - - if self._transaction is not None: - self._transaction._rolled_back = True - del self._transaction - - txn = self._transaction = Transaction(self) - return txn - - def run_in_transaction(self, func, *args, **kw): - """Perform a unit of work in a transaction, retrying on abort. - - :type func: callable - :param func: takes a required positional argument, the transaction, - and additional positional / keyword arguments as supplied - by the caller. - - :type args: tuple - :param args: additional positional arguments to be passed to ``func``. - - :type kw: dict - :param kw: (Optional) keyword arguments to be passed to ``func``. - If passed, "timeout_secs" will be removed and used to - override the default retry timeout which defines maximum timestamp - to continue retrying the transaction. - - :rtype: Any - :returns: The return value of ``func``. - - :raises Exception: - reraises any non-ABORT execptions raised by ``func``. - """ - deadline = time.time() + kw.pop("timeout_secs", DEFAULT_RETRY_TIMEOUT_SECS) - attempts = 0 - - while True: - if self._transaction is None: - txn = self.transaction() - else: - txn = self._transaction - if txn._transaction_id is None: - txn.begin() - - try: - attempts += 1 - return_value = func(txn, *args, **kw) - except Aborted as exc: - del self._transaction - _delay_until_retry(exc, deadline, attempts) - continue - except GoogleAPICallError: - del self._transaction - raise - except Exception: - txn.rollback() - raise - - try: - txn.commit() - except Aborted as exc: - del self._transaction - _delay_until_retry(exc, deadline, attempts) - except GoogleAPICallError: - del self._transaction - raise - else: - return return_value - - -# pylint: disable=misplaced-bare-raise -# -# Rational: this function factors out complex shared deadline / retry -# handling from two `except:` clauses. -def _delay_until_retry(exc, deadline, attempts): - """Helper for :meth:`Session.run_in_transaction`. - - Detect retryable abort, and impose server-supplied delay. - - :type exc: :class:`google.api_core.exceptions.Aborted` - :param exc: exception for aborted transaction - - :type deadline: float - :param deadline: maximum timestamp to continue retrying the transaction. - - :type attempts: int - :param attempts: number of call retries - """ - cause = exc.errors[0] - - now = time.time() - - if now >= deadline: - raise - - delay = _get_retry_delay(cause, attempts) - if delay is not None: - - if now + delay > deadline: - raise - - time.sleep(delay) - - -# pylint: enable=misplaced-bare-raise - - -def _get_retry_delay(cause, attempts): - """Helper for :func:`_delay_until_retry`. - - :type exc: :class:`grpc.Call` - :param exc: exception for aborted transaction - - :rtype: float - :returns: seconds to wait before retrying the transaction. - - :type attempts: int - :param attempts: number of call retries - """ - metadata = dict(cause.trailing_metadata()) - retry_info_pb = metadata.get("google.rpc.retryinfo-bin") - if retry_info_pb is not None: - retry_info = RetryInfo() - retry_info.ParseFromString(retry_info_pb) - nanos = retry_info.retry_delay.nanos - return retry_info.retry_delay.seconds + nanos / 1.0e9 - - return 2 ** attempts + random.random() diff --git a/spanner/google/cloud/spanner_v1/snapshot.py b/spanner/google/cloud/spanner_v1/snapshot.py deleted file mode 100644 index ec7008fb7516..000000000000 --- a/spanner/google/cloud/spanner_v1/snapshot.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Model a set of read-only queries to a database as a snapshot.""" - -import functools - -from google.protobuf.struct_pb2 import Struct -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - -from google.api_core.exceptions import ServiceUnavailable -import google.api_core.gapic_v1.method -from google.cloud._helpers import _datetime_to_pb_timestamp -from google.cloud._helpers import _timedelta_to_duration_pb -from google.cloud.spanner_v1._helpers import _make_value_pb -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1._helpers import _SessionWrapper -from google.cloud.spanner_v1.streamed import StreamedResultSet -from google.cloud.spanner_v1.types import PartitionOptions - - -def _restart_on_unavailable(restart): - """Restart iteration after :exc:`.ServiceUnavailable`. - - :type restart: callable - :param restart: curried function returning iterator - """ - resume_token = b"" - item_buffer = [] - iterator = restart() - while True: - try: - for item in iterator: - item_buffer.append(item) - if item.resume_token: - resume_token = item.resume_token - break - except ServiceUnavailable: - del item_buffer[:] - iterator = restart(resume_token=resume_token) - continue - - if len(item_buffer) == 0: - break - - for item in item_buffer: - yield item - - del item_buffer[:] - - -class _SnapshotBase(_SessionWrapper): - """Base class for Snapshot. - - Allows reuse of API request methods with different transaction selector. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session used to perform the commit - """ - - _multi_use = False - _transaction_id = None - _read_request_count = 0 - _execute_sql_count = 0 - - def _make_txn_selector(self): # pylint: disable=redundant-returns-doc - """Helper for :meth:`read` / :meth:`execute_sql`. - - Subclasses must override, returning an instance of - :class:`transaction_pb2.TransactionSelector` - appropriate for making ``read`` / ``execute_sql`` requests - - :raises: NotImplementedError, always - """ - raise NotImplementedError - - def read(self, table, columns, keyset, index="", limit=0, partition=None): - """Perform a ``StreamingRead`` API request for rows in a table. - - :type table: str - :param table: name of the table from which to fetch data - - :type columns: list of str - :param columns: names of columns to be retrieved - - :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` - :param keyset: keys / ranges identifying rows to be retrieved - - :type index: str - :param index: (Optional) name of index to use, rather than the - table's primary key - - :type limit: int - :param limit: (Optional) maximum number of rows to return. - Incompatible with ``partition``. - - :type partition: bytes - :param partition: (Optional) one of the partition tokens returned - from :meth:`partition_read`. Incompatible with - ``limit``. - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - - :raises ValueError: - for reuse of single-use snapshots, or if a transaction ID is - already pending for multiple-use snapshots. - """ - if self._read_request_count > 0: - if not self._multi_use: - raise ValueError("Cannot re-use single-use snapshot.") - if self._transaction_id is None: - raise ValueError("Transaction ID pending.") - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - - restart = functools.partial( - api.streaming_read, - self._session.name, - table, - columns, - keyset._to_pb(), - transaction=transaction, - index=index, - limit=limit, - partition_token=partition, - metadata=metadata, - ) - - iterator = _restart_on_unavailable(restart) - - self._read_request_count += 1 - - if self._multi_use: - return StreamedResultSet(iterator, source=self) - else: - return StreamedResultSet(iterator) - - def execute_sql( - self, - sql, - params=None, - param_types=None, - query_mode=None, - partition=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - ): - """Perform an ``ExecuteStreamingSql`` API request. - - :type sql: str - :param sql: SQL query statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``sql``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :type query_mode: - :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` - :param query_mode: Mode governing return of results / query plan. See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 - - :type partition: bytes - :param partition: (Optional) one of the partition tokens returned - from :meth:`partition_query`. - - :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` - :returns: a result set instance which can be used to consume rows. - - :raises ValueError: - for reuse of single-use snapshots, or if a transaction ID is - already pending for multiple-use snapshots. - """ - if self._read_request_count > 0: - if not self._multi_use: - raise ValueError("Cannot re-use single-use snapshot.") - if self._transaction_id is None: - raise ValueError("Transaction ID pending.") - - if params is not None: - if param_types is None: - raise ValueError("Specify 'param_types' when passing 'params'.") - params_pb = Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} - ) - else: - params_pb = None - - database = self._session._database - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - api = database.spanner_api - - restart = functools.partial( - api.execute_streaming_sql, - self._session.name, - sql, - transaction=transaction, - params=params_pb, - param_types=param_types, - query_mode=query_mode, - partition_token=partition, - seqno=self._execute_sql_count, - metadata=metadata, - retry=retry, - timeout=timeout, - ) - - iterator = _restart_on_unavailable(restart) - - self._read_request_count += 1 - self._execute_sql_count += 1 - - if self._multi_use: - return StreamedResultSet(iterator, source=self) - else: - return StreamedResultSet(iterator) - - def partition_read( - self, - table, - columns, - keyset, - index="", - partition_size_bytes=None, - max_partitions=None, - ): - """Perform a ``ParitionRead`` API request for rows in a table. - - :type table: str - :param table: name of the table from which to fetch data - - :type columns: list of str - :param columns: names of columns to be retrieved - - :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` - :param keyset: keys / ranges identifying rows to be retrieved - - :type index: str - :param index: (Optional) name of index to use, rather than the - table's primary key - - :type partition_size_bytes: int - :param partition_size_bytes: - (Optional) desired size for each partition generated. The service - uses this as a hint, the actual partition size may differ. - - :type max_partitions: int - :param max_partitions: - (Optional) desired maximum number of partitions generated. The - service uses this as a hint, the actual number of partitions may - differ. - - :rtype: iterable of bytes - :returns: a sequence of partition tokens - - :raises ValueError: - for single-use snapshots, or if a transaction ID is - already associtated with the snapshot. - """ - if not self._multi_use: - raise ValueError("Cannot use single-use snapshot.") - - if self._transaction_id is None: - raise ValueError("Transaction not started.") - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - partition_options = PartitionOptions( - partition_size_bytes=partition_size_bytes, max_partitions=max_partitions - ) - - response = api.partition_read( - session=self._session.name, - table=table, - columns=columns, - key_set=keyset._to_pb(), - transaction=transaction, - index=index, - partition_options=partition_options, - metadata=metadata, - ) - - return [partition.partition_token for partition in response.partitions] - - def partition_query( - self, - sql, - params=None, - param_types=None, - partition_size_bytes=None, - max_partitions=None, - ): - """Perform a ``ParitionQuery`` API request. - - :type sql: str - :param sql: SQL query statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``sql``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :type partition_size_bytes: int - :param partition_size_bytes: - (Optional) desired size for each partition generated. The service - uses this as a hint, the actual partition size may differ. - - :type max_partitions: int - :param max_partitions: - (Optional) desired maximum number of partitions generated. The - service uses this as a hint, the actual number of partitions may - differ. - - :rtype: iterable of bytes - :returns: a sequence of partition tokens - - :raises ValueError: - for single-use snapshots, or if a transaction ID is - already associtated with the snapshot. - """ - if not self._multi_use: - raise ValueError("Cannot use single-use snapshot.") - - if self._transaction_id is None: - raise ValueError("Transaction not started.") - - if params is not None: - if param_types is None: - raise ValueError("Specify 'param_types' when passing 'params'.") - params_pb = Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} - ) - else: - params_pb = None - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - partition_options = PartitionOptions( - partition_size_bytes=partition_size_bytes, max_partitions=max_partitions - ) - - response = api.partition_query( - session=self._session.name, - sql=sql, - transaction=transaction, - params=params_pb, - param_types=param_types, - partition_options=partition_options, - metadata=metadata, - ) - - return [partition.partition_token for partition in response.partitions] - - -class Snapshot(_SnapshotBase): - """Allow a set of reads / SQL statements with shared staleness. - - See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly - - If no options are passed, reads will use the ``strong`` model, reading - at a timestamp where all previously committed transactions are visible. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session used to perform the commit. - - :type read_timestamp: :class:`datetime.datetime` - :param read_timestamp: Execute all reads at the given timestamp. - - :type min_read_timestamp: :class:`datetime.datetime` - :param min_read_timestamp: Execute all reads at a - timestamp >= ``min_read_timestamp``. - - :type max_staleness: :class:`datetime.timedelta` - :param max_staleness: Read data at a - timestamp >= NOW - ``max_staleness`` seconds. - - :type exact_staleness: :class:`datetime.timedelta` - :param exact_staleness: Execute all reads at a timestamp that is - ``exact_staleness`` old. - - :type multi_use: :class:`bool` - :param multi_use: If true, multipl :meth:`read` / :meth:`execute_sql` - calls can be performed with the snapshot in the - context of a read-only transaction, used to ensure - isolation / consistency. Incompatible with - ``max_staleness`` and ``min_read_timestamp``. - """ - - def __init__( - self, - session, - read_timestamp=None, - min_read_timestamp=None, - max_staleness=None, - exact_staleness=None, - multi_use=False, - ): - super(Snapshot, self).__init__(session) - opts = [read_timestamp, min_read_timestamp, max_staleness, exact_staleness] - flagged = [opt for opt in opts if opt is not None] - - if len(flagged) > 1: - raise ValueError("Supply zero or one options.") - - if multi_use: - if min_read_timestamp is not None or max_staleness is not None: - raise ValueError( - "'multi_use' is incompatible with " - "'min_read_timestamp' / 'max_staleness'" - ) - - self._strong = len(flagged) == 0 - self._read_timestamp = read_timestamp - self._min_read_timestamp = min_read_timestamp - self._max_staleness = max_staleness - self._exact_staleness = exact_staleness - self._multi_use = multi_use - - def _make_txn_selector(self): - """Helper for :meth:`read`.""" - if self._transaction_id is not None: - return TransactionSelector(id=self._transaction_id) - - if self._read_timestamp: - key = "read_timestamp" - value = _datetime_to_pb_timestamp(self._read_timestamp) - elif self._min_read_timestamp: - key = "min_read_timestamp" - value = _datetime_to_pb_timestamp(self._min_read_timestamp) - elif self._max_staleness: - key = "max_staleness" - value = _timedelta_to_duration_pb(self._max_staleness) - elif self._exact_staleness: - key = "exact_staleness" - value = _timedelta_to_duration_pb(self._exact_staleness) - else: - key = "strong" - value = True - - options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(**{key: value}) - ) - - if self._multi_use: - return TransactionSelector(begin=options) - else: - return TransactionSelector(single_use=options) - - def begin(self): - """Begin a read-only transaction on the database. - - :rtype: bytes - :returns: the ID for the newly-begun transaction. - - :raises ValueError: - if the transaction is already begun, committed, or rolled back. - """ - if not self._multi_use: - raise ValueError("Cannot call 'begin' on single-use snapshots") - - if self._transaction_id is not None: - raise ValueError("Read-only transaction already begun") - - if self._read_request_count > 0: - raise ValueError("Read-only transaction already pending") - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - txn_selector = self._make_txn_selector() - response = api.begin_transaction( - self._session.name, txn_selector.begin, metadata=metadata - ) - self._transaction_id = response.id - return self._transaction_id diff --git a/spanner/google/cloud/spanner_v1/streamed.py b/spanner/google/cloud/spanner_v1/streamed.py deleted file mode 100644 index 5d1a31e93124..000000000000 --- a/spanner/google/cloud/spanner_v1/streamed.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrapper for streaming results.""" - -from google.protobuf.struct_pb2 import ListValue -from google.protobuf.struct_pb2 import Value -from google.cloud import exceptions -from google.cloud.spanner_v1.proto import type_pb2 -import six - -# pylint: disable=ungrouped-imports -from google.cloud.spanner_v1._helpers import _parse_value_pb - -# pylint: enable=ungrouped-imports - - -class StreamedResultSet(object): - """Process a sequence of partial result sets into a single set of row data. - - :type response_iterator: - :param response_iterator: - Iterator yielding - :class:`google.cloud.spanner_v1.proto.result_set_pb2.PartialResultSet` - instances. - - :type source: :class:`~google.cloud.spanner_v1.snapshot.Snapshot` - :param source: Snapshot from which the result set was fetched. - """ - - def __init__(self, response_iterator, source=None): - self._response_iterator = response_iterator - self._rows = [] # Fully-processed rows - self._counter = 0 # Counter for processed responses - self._metadata = None # Until set from first PRS - self._stats = None # Until set from last PRS - self._current_row = [] # Accumulated values for incomplete row - self._pending_chunk = None # Incomplete value - self._source = source # Source snapshot - - @property - def fields(self): - """Field descriptors for result set columns. - - :rtype: list of :class:`~google.cloud.spanner_v1.proto.type_pb2.Field` - :returns: list of fields describing column names / types. - """ - return self._metadata.row_type.fields - - @property - def metadata(self): - """Result set metadata - - :rtype: :class:`~.result_set_pb2.ResultSetMetadata` - :returns: structure describing the results - """ - return self._metadata - - @property - def stats(self): - """Result set statistics - - :rtype: - :class:`~google.cloud.spanner_v1.proto.result_set_pb2.ResultSetStats` - :returns: structure describing status about the response - """ - return self._stats - - def _merge_chunk(self, value): - """Merge pending chunk with next value. - - :type value: :class:`~google.protobuf.struct_pb2.Value` - :param value: continuation of chunked value from previous - partial result set. - - :rtype: :class:`~google.protobuf.struct_pb2.Value` - :returns: the merged value - """ - current_column = len(self._current_row) - field = self.fields[current_column] - merged = _merge_by_type(self._pending_chunk, value, field.type) - self._pending_chunk = None - return merged - - def _merge_values(self, values): - """Merge values into rows. - - :type values: list of :class:`~google.protobuf.struct_pb2.Value` - :param values: non-chunked values from partial result set. - """ - width = len(self.fields) - for value in values: - index = len(self._current_row) - field = self.fields[index] - self._current_row.append(_parse_value_pb(value, field.type)) - if len(self._current_row) == width: - self._rows.append(self._current_row) - self._current_row = [] - - def _consume_next(self): - """Consume the next partial result set from the stream. - - Parse the result set into new/existing rows in :attr:`_rows` - """ - response = six.next(self._response_iterator) - self._counter += 1 - - if self._metadata is None: # first response - metadata = self._metadata = response.metadata - - source = self._source - if source is not None and source._transaction_id is None: - source._transaction_id = metadata.transaction.id - - if response.HasField("stats"): # last response - self._stats = response.stats - - values = list(response.values) - if self._pending_chunk is not None: - values[0] = self._merge_chunk(values[0]) - - if response.chunked_value: - self._pending_chunk = values.pop() - - self._merge_values(values) - - def __iter__(self): - iter_rows, self._rows[:] = self._rows[:], () - while True: - if not iter_rows: - try: - self._consume_next() - except StopIteration: - return - iter_rows, self._rows[:] = self._rows[:], () - while iter_rows: - yield iter_rows.pop(0) - - def one(self): - """Return exactly one result, or raise an exception. - - :raises: :exc:`NotFound`: If there are no results. - :raises: :exc:`ValueError`: If there are multiple results. - :raises: :exc:`RuntimeError`: If consumption has already occurred, - in whole or in part. - """ - answer = self.one_or_none() - if answer is None: - raise exceptions.NotFound("No rows matched the given query.") - return answer - - def one_or_none(self): - """Return exactly one result, or None if there are no results. - - :raises: :exc:`ValueError`: If there are multiple results. - :raises: :exc:`RuntimeError`: If consumption has already occurred, - in whole or in part. - """ - # Sanity check: Has consumption of this query already started? - # If it has, then this is an exception. - if self._metadata is not None: - raise RuntimeError( - "Can not call `.one` or `.one_or_none` after " - "stream consumption has already started." - ) - - # Consume the first result of the stream. - # If there is no first result, then return None. - iterator = iter(self) - try: - answer = next(iterator) - except StopIteration: - return None - - # Attempt to consume more. This should no-op; if we get additional - # rows, then this is an error case. - try: - next(iterator) - raise ValueError("Expected one result; got more.") - except StopIteration: - return answer - - -class Unmergeable(ValueError): - """Unable to merge two values. - - :type lhs: :class:`google.protobuf.struct_pb2.Value` - :param lhs: pending value to be merged - - :type rhs: :class:`google.protobuf.struct_pb2.Value` - :param rhs: remaining value to be merged - - :type type_: :class:`google.cloud.spanner_v1.proto.type_pb2.Type` - :param type_: field type of values being merged - """ - - def __init__(self, lhs, rhs, type_): - message = "Cannot merge %s values: %s %s" % ( - type_pb2.TypeCode.Name(type_.code), - lhs, - rhs, - ) - super(Unmergeable, self).__init__(message) - - -def _unmergeable(lhs, rhs, type_): - """Helper for '_merge_by_type'.""" - raise Unmergeable(lhs, rhs, type_) - - -def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument - """Helper for '_merge_by_type'.""" - lhs_kind = lhs.WhichOneof("kind") - if lhs_kind == "string_value": - return Value(string_value=lhs.string_value + rhs.string_value) - rhs_kind = rhs.WhichOneof("kind") - array_continuation = ( - lhs_kind == "number_value" - and rhs_kind == "string_value" - and rhs.string_value == "" - ) - if array_continuation: - return lhs - raise Unmergeable(lhs, rhs, type_) - - -def _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument - """Helper for '_merge_by_type'.""" - return Value(string_value=lhs.string_value + rhs.string_value) - - -_UNMERGEABLE_TYPES = (type_pb2.BOOL,) - - -def _merge_array(lhs, rhs, type_): - """Helper for '_merge_by_type'.""" - element_type = type_.array_element_type - if element_type.code in _UNMERGEABLE_TYPES: - # Individual values cannot be merged, just concatenate - lhs.list_value.values.extend(rhs.list_value.values) - return lhs - lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) - - # Sanity check: If either list is empty, short-circuit. - # This is effectively a no-op. - if not len(lhs) or not len(rhs): - return Value(list_value=ListValue(values=(lhs + rhs))) - - first = rhs.pop(0) - if first.HasField("null_value"): # can't merge - lhs.append(first) - else: - last = lhs.pop() - try: - merged = _merge_by_type(last, first, element_type) - except Unmergeable: - lhs.append(last) - lhs.append(first) - else: - lhs.append(merged) - return Value(list_value=ListValue(values=(lhs + rhs))) - - -def _merge_struct(lhs, rhs, type_): - """Helper for '_merge_by_type'.""" - fields = type_.struct_type.fields - lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values) - - # Sanity check: If either list is empty, short-circuit. - # This is effectively a no-op. - if not len(lhs) or not len(rhs): - return Value(list_value=ListValue(values=(lhs + rhs))) - - candidate_type = fields[len(lhs) - 1].type - first = rhs.pop(0) - if first.HasField("null_value") or candidate_type.code in _UNMERGEABLE_TYPES: - lhs.append(first) - else: - last = lhs.pop() - lhs.append(_merge_by_type(last, first, candidate_type)) - return Value(list_value=ListValue(values=lhs + rhs)) - - -_MERGE_BY_TYPE = { - type_pb2.ARRAY: _merge_array, - type_pb2.BOOL: _unmergeable, - type_pb2.BYTES: _merge_string, - type_pb2.DATE: _merge_string, - type_pb2.FLOAT64: _merge_float64, - type_pb2.INT64: _merge_string, - type_pb2.STRING: _merge_string, - type_pb2.STRUCT: _merge_struct, - type_pb2.TIMESTAMP: _merge_string, -} - - -def _merge_by_type(lhs, rhs, type_): - """Helper for '_merge_chunk'.""" - merger = _MERGE_BY_TYPE[type_.code] - return merger(lhs, rhs, type_) diff --git a/spanner/google/cloud/spanner_v1/transaction.py b/spanner/google/cloud/spanner_v1/transaction.py deleted file mode 100644 index 29a2e5f786e1..000000000000 --- a/spanner/google/cloud/spanner_v1/transaction.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Spanner read-write transaction support.""" - -from google.protobuf.struct_pb2 import Struct - -from google.cloud._helpers import _pb_timestamp_to_datetime -from google.cloud.spanner_v1._helpers import _make_value_pb -from google.cloud.spanner_v1._helpers import _metadata_with_prefix -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector -from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions -from google.cloud.spanner_v1.snapshot import _SnapshotBase -from google.cloud.spanner_v1.batch import _BatchBase - - -class Transaction(_SnapshotBase, _BatchBase): - """Implement read-write transaction semantics for a session. - - :type session: :class:`~google.cloud.spanner_v1.session.Session` - :param session: the session used to perform the commit - - :raises ValueError: if session has an existing transaction - """ - - committed = None - """Timestamp at which the transaction was successfully committed.""" - _rolled_back = False - _multi_use = True - _execute_sql_count = 0 - - def __init__(self, session): - if session._transaction is not None: - raise ValueError("Session has existing transaction.") - - super(Transaction, self).__init__(session) - - def _check_state(self): - """Helper for :meth:`commit` et al. - - :raises: :exc:`ValueError` if the object's state is invalid for making - API requests. - """ - if self._transaction_id is None: - raise ValueError("Transaction is not begun") - - if self.committed is not None: - raise ValueError("Transaction is already committed") - - if self._rolled_back: - raise ValueError("Transaction is already rolled back") - - def _make_txn_selector(self): - """Helper for :meth:`read`. - - :rtype: - :class:`~.transaction_pb2.TransactionSelector` - :returns: a selector configured for read-write transaction semantics. - """ - self._check_state() - return TransactionSelector(id=self._transaction_id) - - def begin(self): - """Begin a transaction on the database. - - :rtype: bytes - :returns: the ID for the newly-begun transaction. - :raises ValueError: - if the transaction is already begun, committed, or rolled back. - """ - if self._transaction_id is not None: - raise ValueError("Transaction already begun") - - if self.committed is not None: - raise ValueError("Transaction already committed") - - if self._rolled_back: - raise ValueError("Transaction is already rolled back") - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - response = api.begin_transaction( - self._session.name, txn_options, metadata=metadata - ) - self._transaction_id = response.id - return self._transaction_id - - def rollback(self): - """Roll back a transaction on the database.""" - self._check_state() - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - api.rollback(self._session.name, self._transaction_id, metadata=metadata) - self._rolled_back = True - del self._session._transaction - - def commit(self): - """Commit mutations to the database. - - :rtype: datetime - :returns: timestamp of the committed changes. - :raises ValueError: if there are no mutations to commit. - """ - self._check_state() - - database = self._session._database - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - response = api.commit( - self._session.name, - mutations=self._mutations, - transaction_id=self._transaction_id, - metadata=metadata, - ) - self.committed = _pb_timestamp_to_datetime(response.commit_timestamp) - del self._session._transaction - return self.committed - - @staticmethod - def _make_params_pb(params, param_types): - """Helper for :meth:`execute_update`. - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``dml``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :rtype: Union[None, :class:`Struct`] - :returns: a struct message for the passed params, or None - :raises ValueError: - If ``param_types`` is None but ``params`` is not None. - :raises ValueError: - If ``params`` is None but ``param_types`` is not None. - """ - if params is not None: - if param_types is None: - raise ValueError("Specify 'param_types' when passing 'params'.") - return Struct( - fields={key: _make_value_pb(value) for key, value in params.items()} - ) - else: - if param_types is not None: - raise ValueError("Specify 'params' when passing 'param_types'.") - - return None - - def execute_update(self, dml, params=None, param_types=None, query_mode=None): - """Perform an ``ExecuteSql`` API request with DML. - - :type dml: str - :param dml: SQL DML statement - - :type params: dict, {str -> column value} - :param params: values for parameter replacement. Keys must match - the names used in ``dml``. - - :type param_types: dict[str -> Union[dict, .types.Type]] - :param param_types: - (Optional) maps explicit types for one or more param values; - required if parameters are passed. - - :type query_mode: - :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryMode` - :param query_mode: Mode governing return of results / query plan. See - https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 - - :rtype: int - :returns: Count of rows affected by the DML statement. - """ - params_pb = self._make_params_pb(params, param_types) - database = self._session._database - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - api = database.spanner_api - - response = api.execute_sql( - self._session.name, - dml, - transaction=transaction, - params=params_pb, - param_types=param_types, - query_mode=query_mode, - seqno=self._execute_sql_count, - metadata=metadata, - ) - - self._execute_sql_count += 1 - return response.stats.row_count_exact - - def batch_update(self, statements): - """Perform a batch of DML statements via an ``ExecuteBatchDml`` request. - - :type statements: - Sequence[Union[ str, Tuple[str, Dict[str, Any], Dict[str, Union[dict, .types.Type]]]]] - - :param statements: - List of DML statements, with optional params / param types. - If passed, 'params' is a dict mapping names to the values - for parameter replacement. Keys must match the names used in the - corresponding DML statement. If 'params' is passed, 'param_types' - must also be passed, as a dict mapping names to the type of - value passed in 'params'. - - :rtype: - Tuple(status, Sequence[int]) - :returns: - Status code, plus counts of rows affected by each completed DML - statement. Note that if the staus code is not ``OK``, the - statement triggering the error will not have an entry in the - list, nor will any statements following that one. - """ - parsed = [] - for statement in statements: - if isinstance(statement, str): - parsed.append({"sql": statement}) - else: - dml, params, param_types = statement - params_pb = self._make_params_pb(params, param_types) - parsed.append( - {"sql": dml, "params": params_pb, "param_types": param_types} - ) - - database = self._session._database - metadata = _metadata_with_prefix(database.name) - transaction = self._make_txn_selector() - api = database.spanner_api - - response = api.execute_batch_dml( - session=self._session.name, - transaction=transaction, - statements=parsed, - seqno=self._execute_sql_count, - metadata=metadata, - ) - - self._execute_sql_count += 1 - row_counts = [ - result_set.stats.row_count_exact for result_set in response.result_sets - ] - return response.status, row_counts - - def __enter__(self): - """Begin ``with`` block.""" - self.begin() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """End ``with`` block.""" - if exc_type is None: - self.commit() - else: - self.rollback() diff --git a/spanner/google/cloud/spanner_v1/types.py b/spanner/google/cloud/spanner_v1/types.py deleted file mode 100644 index 07c94ba871e3..000000000000 --- a/spanner/google/cloud/spanner_v1/types.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import http_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import mutation_pb2 -from google.cloud.spanner_v1.proto import query_plan_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.cloud.spanner_v1.proto import type_pb2 - - -_shared_modules = [ - http_pb2, - descriptor_pb2, - duration_pb2, - empty_pb2, - struct_pb2, - timestamp_pb2, -] - -_local_modules = [ - keys_pb2, - mutation_pb2, - query_plan_pb2, - result_set_pb2, - spanner_pb2, - transaction_pb2, - type_pb2, -] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.spanner_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/spanner/noxfile.py b/spanner/noxfile.py deleted file mode 100644 index 7949a4e3925a..000000000000 --- a/spanner/noxfile.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! - -from __future__ import absolute_import -import os -import shutil - -import nox - - -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) -BLACK_VERSION = "black==19.3b0" -BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] - -if os.path.exists("samples"): - BLACK_PATHS.append("samples") - - -@nox.session(python="3.7") -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION, *LOCAL_DEPS) - session.run("black", "--check", *BLACK_PATHS) - session.run("flake8", "google", "tests") - - -@nox.session(python="3.6") -def blacken(session): - """Run black. - - Format code to uniform standard. - - This currently uses Python 3.6 due to the automated Kokoro run of synthtool. - That run uses an image that doesn't have 3.6 installed. Before updating this - check the state of the `gcp_ubuntu_config` we use for that Kokoro run. - """ - session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) - - -@nox.session(python="3.7") -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") - - -def default(session): - # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", ".") - - # Run py.test against the unit tests. - session.run( - "py.test", - "--quiet", - "--cov=google.cloud", - "--cov=tests.unit", - "--cov-append", - "--cov-config=.coveragerc", - "--cov-report=", - "--cov-fail-under=0", - os.path.join("tests", "unit"), - *session.posargs, - ) - - -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) -def unit(session): - """Run the unit test suite.""" - default(session) - - -@nox.session(python=["2.7", "3.7"]) -def system(session): - """Run the system test suite.""" - system_test_path = os.path.join("tests", "system.py") - system_test_folder_path = os.path.join("tests", "system") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") - - system_test_exists = os.path.exists(system_test_path) - system_test_folder_exists = os.path.exists(system_test_folder_path) - # Sanity check: only run tests if found. - if not system_test_exists and not system_test_folder_exists: - session.skip("System tests were not found") - - # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") - - # Install all test dependencies, then install this package into the - # virtualenv's dist-packages. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "../test_utils/") - session.install("-e", ".") - - # Run py.test against the system tests. - if system_test_exists: - session.run("py.test", "--quiet", system_test_path, *session.posargs) - if system_test_folder_exists: - session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) - - -@nox.session(python="3.7") -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python="3.7") -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/spanner/pylint.config.py b/spanner/pylint.config.py deleted file mode 100644 index f7928f67601e..000000000000 --- a/spanner/pylint.config.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module is used to configure gcp-devrel-py-tools run-pylint.""" - -import copy - -from gcp_devrel.tools import pylint - -# Library configuration - -# library_additions = {} -# Ignore generated code -library_replacements = copy.deepcopy(pylint.DEFAULT_LIBRARY_RC_REPLACEMENTS) -library_replacements['MASTER']['ignore'].append('spanner_v1') -library_replacements['MASTER']['ignore'].append('spanner_admin_instance_v1') -library_replacements['MASTER']['ignore'].append('spanner_admin_database_v1') - -# Test configuration - -# test_additions = copy.deepcopy(library_additions) -# test_replacements = copy.deepcopy(library_replacements) diff --git a/spanner/setup.cfg b/spanner/setup.cfg deleted file mode 100644 index 3bd555500e37..000000000000 --- a/spanner/setup.cfg +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by synthtool. DO NOT EDIT! -[bdist_wheel] -universal = 1 diff --git a/spanner/setup.py b/spanner/setup.py deleted file mode 100644 index b76e9f33ccbc..000000000000 --- a/spanner/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -import setuptools - - -# Package metadata. - -name = "google-cloud-spanner" -description = "Cloud Spanner API client library" -version = "1.13.0" -# Should be one of: -# 'Development Status :: 3 - Alpha' -# 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Production/Stable' -release_status = "Development Status :: 5 - Production/Stable" -dependencies = [ - "google-api-core[grpc, grpcgcp] >= 1.14.0, < 2.0.0dev", - "google-cloud-core >= 1.0.3, < 2.0dev", - "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", -] -extras = {} - - -# Setup boilerplate below this line. - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -# Only include packages under the 'google' namespace. Do not include tests, -# benchmarks, etc. -packages = [ - package for package in setuptools.find_packages() if package.startswith("google") -] - -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url="https://github.com/GoogleCloudPlatform/google-cloud-python", - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - namespace_packages=namespaces, - install_requires=dependencies, - extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", - include_package_data=True, - zip_safe=False, -) diff --git a/spanner/synth.metadata b/spanner/synth.metadata deleted file mode 100644 index 2e5ff8a80821..000000000000 --- a/spanner/synth.metadata +++ /dev/null @@ -1,494 +0,0 @@ -{ - "updateTime": "2020-01-30T13:37:36.907968Z", - "sources": [ - { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c1246a29e22b0f98e800a536b5b0da2d933a55f2", - "internalRef": "292310790", - "log": "c1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n08b488e0660c59842a7dee0e3e2b65d9e3a514a9\nExposing cloud_catalog.proto (This API is already available through REST)\n\nPiperOrigin-RevId: 288625007\n\na613482977e11ac09fa47687a5d1b5a01efcf794\nUpdate the OS Login v1beta API description to render better in the UI.\n\nPiperOrigin-RevId: 288547940\n\n5e182b8d9943f1b17008d69d4c7e865dc83641a7\nUpdate the OS Login API description to render better in the UI.\n\nPiperOrigin-RevId: 288546443\n\n" - } - }, - { - "template": { - "name": "python_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" - } - } - ], - "destinations": [ - { - "client": { - "source": "googleapis", - "apiName": "spanner", - "apiVersion": "v1", - "language": "python", - "generator": "gapic", - "config": "google/spanner/artman_spanner.yaml" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "spanner_admin_instance", - "apiVersion": "v1", - "language": "python", - "generator": "gapic", - "config": "google/spanner/admin/instance/artman_spanner_admin_instance.yaml" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "spanner_admin_database", - "apiVersion": "v1", - "language": "python", - "generator": "gapic", - "config": "google/spanner/admin/database/artman_spanner_admin_database.yaml" - } - } - ], - "newFiles": [ - { - "path": ".coveragerc" - }, - { - "path": ".flake8" - }, - { - "path": ".repo-metadata.json" - }, - { - "path": "CHANGELOG.md" - }, - { - "path": "LICENSE" - }, - { - "path": "MANIFEST.in" - }, - { - "path": "README.rst" - }, - { - "path": "benchmark/bin/ycsb" - }, - { - "path": "benchmark/ycsb.py" - }, - { - "path": "docs/README.rst" - }, - { - "path": "docs/_static/custom.css" - }, - { - "path": "docs/_templates/layout.html" - }, - { - "path": "docs/advanced-session-pool-topics.rst" - }, - { - "path": "docs/api-reference.rst" - }, - { - "path": "docs/batch-api.rst" - }, - { - "path": "docs/batch-usage.rst" - }, - { - "path": "docs/changelog.md" - }, - { - "path": "docs/client-api.rst" - }, - { - "path": "docs/client-usage.rst" - }, - { - "path": "docs/conf.py" - }, - { - "path": "docs/database-api.rst" - }, - { - "path": "docs/database-usage.rst" - }, - { - "path": "docs/gapic/v1/admin_database_api.rst" - }, - { - "path": "docs/gapic/v1/admin_database_types.rst" - }, - { - "path": "docs/gapic/v1/admin_instance_api.rst" - }, - { - "path": "docs/gapic/v1/admin_instance_types.rst" - }, - { - "path": "docs/gapic/v1/api.rst" - }, - { - "path": "docs/gapic/v1/transactions.rst" - }, - { - "path": "docs/gapic/v1/types.rst" - }, - { - "path": "docs/index.rst" - }, - { - "path": "docs/instance-api.rst" - }, - { - "path": "docs/instance-usage.rst" - }, - { - "path": "docs/keyset-api.rst" - }, - { - "path": "docs/session-api.rst" - }, - { - "path": "docs/snapshot-api.rst" - }, - { - "path": "docs/snapshot-usage.rst" - }, - { - "path": "docs/streamed-api.rst" - }, - { - "path": "docs/transaction-api.rst" - }, - { - "path": "docs/transaction-usage.rst" - }, - { - "path": "docs/usage.html" - }, - { - "path": "google/__init__.py" - }, - { - "path": "google/cloud/__init__.py" - }, - { - "path": "google/cloud/spanner.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/enums.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/proto/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto" - }, - { - "path": "google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_admin_database_v1/types.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/enums.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client_config.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/proto/__init__.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_admin_instance_v1/types.py" - }, - { - "path": "google/cloud/spanner_v1/__init__.py" - }, - { - "path": "google/cloud/spanner_v1/_helpers.py" - }, - { - "path": "google/cloud/spanner_v1/batch.py" - }, - { - "path": "google/cloud/spanner_v1/client.py" - }, - { - "path": "google/cloud/spanner_v1/database.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/__init__.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/enums.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/spanner_client.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/spanner_client_config.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/spanner_v1/gapic/transports/spanner.grpc.config" - }, - { - "path": "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py" - }, - { - "path": "google/cloud/spanner_v1/instance.py" - }, - { - "path": "google/cloud/spanner_v1/keyset.py" - }, - { - "path": "google/cloud/spanner_v1/param_types.py" - }, - { - "path": "google/cloud/spanner_v1/pool.py" - }, - { - "path": "google/cloud/spanner_v1/proto/__init__.py" - }, - { - "path": "google/cloud/spanner_v1/proto/keys.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/keys_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/keys_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/mutation.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/mutation_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/mutation_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/query_plan.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/query_plan_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/query_plan_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/result_set.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/result_set_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/result_set_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/spanner.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/spanner_database_admin.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/spanner_instance_admin.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/spanner_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/spanner_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/transaction.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/transaction_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/transaction_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/proto/type.proto" - }, - { - "path": "google/cloud/spanner_v1/proto/type_pb2.py" - }, - { - "path": "google/cloud/spanner_v1/proto/type_pb2_grpc.py" - }, - { - "path": "google/cloud/spanner_v1/session.py" - }, - { - "path": "google/cloud/spanner_v1/snapshot.py" - }, - { - "path": "google/cloud/spanner_v1/streamed.py" - }, - { - "path": "google/cloud/spanner_v1/transaction.py" - }, - { - "path": "google/cloud/spanner_v1/types.py" - }, - { - "path": "noxfile.py" - }, - { - "path": "pylint.config.py" - }, - { - "path": "setup.cfg" - }, - { - "path": "setup.py" - }, - { - "path": "synth.metadata" - }, - { - "path": "synth.py" - }, - { - "path": "tests/__init__.py" - }, - { - "path": "tests/_fixtures.py" - }, - { - "path": "tests/system/__init__.py" - }, - { - "path": "tests/system/test_system.py" - }, - { - "path": "tests/system/utils/__init__.py" - }, - { - "path": "tests/system/utils/clear_streaming.py" - }, - { - "path": "tests/system/utils/populate_streaming.py" - }, - { - "path": "tests/system/utils/scrub_instances.py" - }, - { - "path": "tests/system/utils/streaming_utils.py" - }, - { - "path": "tests/unit/__init__.py" - }, - { - "path": "tests/unit/gapic/v1/test_database_admin_client_v1.py" - }, - { - "path": "tests/unit/gapic/v1/test_instance_admin_client_v1.py" - }, - { - "path": "tests/unit/gapic/v1/test_spanner_client_v1.py" - }, - { - "path": "tests/unit/streaming-read-acceptance-test.json" - }, - { - "path": "tests/unit/test__helpers.py" - }, - { - "path": "tests/unit/test_batch.py" - }, - { - "path": "tests/unit/test_client.py" - }, - { - "path": "tests/unit/test_database.py" - }, - { - "path": "tests/unit/test_instance.py" - }, - { - "path": "tests/unit/test_keyset.py" - }, - { - "path": "tests/unit/test_param_types.py" - }, - { - "path": "tests/unit/test_pool.py" - }, - { - "path": "tests/unit/test_session.py" - }, - { - "path": "tests/unit/test_snapshot.py" - }, - { - "path": "tests/unit/test_streamed.py" - }, - { - "path": "tests/unit/test_transaction.py" - } - ] -} \ No newline at end of file diff --git a/spanner/synth.py b/spanner/synth.py deleted file mode 100644 index b30b82114a39..000000000000 --- a/spanner/synth.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script is used to synthesize generated parts of this library.""" -import synthtool as s -from synthtool import gcp - -gapic = gcp.GAPICGenerator() -common = gcp.CommonTemplates() - -# ---------------------------------------------------------------------------- -# Generate spanner GAPIC layer -# ---------------------------------------------------------------------------- -library = gapic.py_library( - "spanner", - "v1", - config_path="/google/spanner/artman_spanner.yaml", - artman_output_name="spanner-v1", - include_protos=True, -) - -s.move(library / "google/cloud/spanner_v1/proto") -s.move(library / "google/cloud/spanner_v1/gapic") -s.move(library / "tests") - -# Add grpcio-gcp options -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "import google.api_core.grpc_helpers\n", - "import pkg_resources\n" - "import grpc_gcp\n" - "\n" - "import google.api_core.grpc_helpers\n", -) -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "from google.cloud.spanner_v1.proto import spanner_pb2_grpc\n", - "\g<0>\n\n_SPANNER_GRPC_CONFIG = 'spanner.grpc.config'\n", -) - -s.replace( - "google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py", - "(\s+)return google.api_core.grpc_helpers.create_channel\(\n", - "\g<1>grpc_gcp_config = grpc_gcp.api_config_from_text_pb(" - "\g<1> pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG))" - "\g<1>options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)]" - "\g<0>", -) -s.replace( - "tests/unit/gapic/v1/test_spanner_client_v1.py", - "from google.cloud import spanner_v1", - "from google.cloud.spanner_v1.gapic import spanner_client as spanner_v1", -) - -# ---------------------------------------------------------------------------- -# Generate instance admin client -# ---------------------------------------------------------------------------- -library = gapic.py_library( - "spanner_admin_instance", - "v1", - config_path="/google/spanner/admin/instance" "/artman_spanner_admin_instance.yaml", - artman_output_name="spanner-admin-instance-v1", - include_protos=True, -) - -s.move(library / "google/cloud/spanner_admin_instance_v1/gapic") -s.move(library / "google/cloud/spanner_admin_instance_v1/proto") -s.move(library / "tests") - -# Fix up the _GAPIC_LIBRARY_VERSION targets -s.replace( - "google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py", - "'google-cloud-spanner-admin-instance'", - "'google-cloud-spanner'", -) - -# Fix up generated imports -s.replace( - "google/**/*.py", - "from google\.cloud\.spanner\.admin\.instance_v1.proto", - "from google.cloud.spanner_admin_instance_v1.proto", -) - -# Fix docstrings -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""====*""", r"") -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""----*""", r"") -s.replace("google/cloud/spanner_v1/proto/transaction_pb2.py", r"""~~~~*""", r"") - -# ---------------------------------------------------------------------------- -# Generate database admin client -# ---------------------------------------------------------------------------- -library = gapic.py_library( - "spanner_admin_database", - "v1", - config_path="/google/spanner/admin/database" "/artman_spanner_admin_database.yaml", - artman_output_name="spanner-admin-database-v1", - include_protos=True, -) - -s.move(library / "google/cloud/spanner_admin_database_v1/gapic") -s.move(library / "google/cloud/spanner_admin_database_v1/proto") -s.move(library / "tests") - -# Fix up the _GAPIC_LIBRARY_VERSION targets -s.replace( - "google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py", - "'google-cloud-spanner-admin-database'", - "'google-cloud-spanner'", -) - -# Fix up the _GAPIC_LIBRARY_VERSION targets -s.replace( - "google/**/*.py", - "from google\.cloud\.spanner\.admin\.database_v1.proto", - "from google.cloud.spanner_admin_database_v1.proto", -) - -# Fix up proto docs that are missing summary line. -s.replace( - "google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py", - '"""Attributes:', - '"""Protocol buffer.\n\n Attributes:', -) - -# ---------------------------------------------------------------------------- -# Add templated files -# ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=100) -s.move(templated_files) - -# Template's MANIFEST.in does not include the needed GAPIC config file. -# See PR #6928. -s.replace( - "MANIFEST.in", - "include README.rst LICENSE\n", - "include README.rst LICENSE\n" - "include google/cloud/spanner_v1/gapic/transports/spanner.grpc.config\n", -) - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/spanner/tests/__init__.py b/spanner/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/tests/_fixtures.py b/spanner/tests/_fixtures.py deleted file mode 100644 index d0b78c0ba506..000000000000 --- a/spanner/tests/_fixtures.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test fixtures.""" - - -DDL = """\ -CREATE TABLE contacts ( - contact_id INT64, - first_name STRING(1024), - last_name STRING(1024), - email STRING(1024) ) - PRIMARY KEY (contact_id); -CREATE TABLE contact_phones ( - contact_id INT64, - phone_type STRING(1024), - phone_number STRING(1024) ) - PRIMARY KEY (contact_id, phone_type), - INTERLEAVE IN PARENT contacts ON DELETE CASCADE; -CREATE TABLE all_types ( - pkey INT64 NOT NULL, - int_value INT64, - int_array ARRAY, - bool_value BOOL, - bool_array ARRAY, - bytes_value BYTES(16), - bytes_array ARRAY, - date_value DATE, - date_array ARRAY, - float_value FLOAT64, - float_array ARRAY, - string_value STRING(16), - string_array ARRAY, - timestamp_value TIMESTAMP, - timestamp_array ARRAY) - PRIMARY KEY (pkey); -CREATE TABLE counters ( - name STRING(1024), - value INT64 ) - PRIMARY KEY (name); -CREATE TABLE string_plus_array_of_string ( - id INT64, - name STRING(16), - tags ARRAY ) - PRIMARY KEY (id); -CREATE INDEX name ON contacts(first_name, last_name); -CREATE TABLE users_history ( - id INT64 NOT NULL, - commit_ts TIMESTAMP NOT NULL OPTIONS - (allow_commit_timestamp=true), - name STRING(MAX) NOT NULL, - email STRING(MAX), - deleted BOOL NOT NULL ) - PRIMARY KEY(id, commit_ts DESC); -""" - -DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()] diff --git a/spanner/tests/system/__init__.py b/spanner/tests/system/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/tests/system/test_system.py b/spanner/tests/system/test_system.py deleted file mode 100644 index ae688029b4d2..000000000000 --- a/spanner/tests/system/test_system.py +++ /dev/null @@ -1,2277 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import datetime -import math -import operator -import os -import struct -import threading -import time -import unittest -import uuid - -import pytest -import grpc -from google.rpc import code_pb2 - -from google.api_core import exceptions -from google.api_core.datetime_helpers import DatetimeWithNanoseconds - -from google.cloud.spanner_v1 import param_types -from google.cloud.spanner_v1.proto.type_pb2 import ARRAY -from google.cloud.spanner_v1.proto.type_pb2 import BOOL -from google.cloud.spanner_v1.proto.type_pb2 import BYTES -from google.cloud.spanner_v1.proto.type_pb2 import DATE -from google.cloud.spanner_v1.proto.type_pb2 import FLOAT64 -from google.cloud.spanner_v1.proto.type_pb2 import INT64 -from google.cloud.spanner_v1.proto.type_pb2 import STRING -from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP -from google.cloud.spanner_v1.proto.type_pb2 import Type - -from google.cloud._helpers import UTC -from google.cloud.spanner import Client -from google.cloud.spanner import KeyRange -from google.cloud.spanner import KeySet -from google.cloud.spanner import BurstyPool -from google.cloud.spanner import COMMIT_TIMESTAMP - -from test_utils.retry import RetryErrors -from test_utils.retry import RetryInstanceState -from test_utils.retry import RetryResult -from test_utils.system import unique_resource_id -from tests._fixtures import DDL_STATEMENTS - - -CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None -USE_RESOURCE_ROUTING = ( - os.getenv("GOOGLE_CLOUD_SPANNER_ENABLE_RESOURCE_BASED_ROUTING") == "true" -) - -if CREATE_INSTANCE: - INSTANCE_ID = "google-cloud" + unique_resource_id("-") -else: - INSTANCE_ID = os.environ.get( - "GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest" - ) -EXISTING_INSTANCES = [] -COUNTERS_TABLE = "counters" -COUNTERS_COLUMNS = ("name", "value") - -_STATUS_CODE_TO_GRPC_STATUS_CODE = { - member.value[0]: member for member in grpc.StatusCode -} - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE_CONFIG = None - INSTANCE = None - - -def _has_all_ddl(database): - return len(database.ddl_statements) == len(DDL_STATEMENTS) - - -def _list_instances(): - return list(Config.CLIENT.list_instances()) - - -def setUpModule(): - Config.CLIENT = Client() - retry = RetryErrors(exceptions.ServiceUnavailable) - - configs = list(retry(Config.CLIENT.list_instance_configs)()) - - instances = retry(_list_instances)() - EXISTING_INSTANCES[:] = instances - - if CREATE_INSTANCE: - - # Defend against back-end returning configs for regions we aren't - # actually allowed to use. - configs = [config for config in configs if "-us-" in config.name] - - if not configs: - raise ValueError("List instance configs failed in module set up.") - - Config.INSTANCE_CONFIG = configs[0] - config_name = configs[0].name - - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name) - created_op = Config.INSTANCE.create() - created_op.result(30) # block until completion - - else: - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID) - Config.INSTANCE.reload() - - -def tearDownModule(): - if CREATE_INSTANCE: - Config.INSTANCE.delete() - - -class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): - self.instances_to_delete = [] - - def tearDown(self): - for instance in self.instances_to_delete: - instance.delete() - - def test_list_instances(self): - instances = list(Config.CLIENT.list_instances()) - # We have added one new instance in `setUpModule`. - if CREATE_INSTANCE: - self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) - for instance in instances: - instance_existence = ( - instance in EXISTING_INSTANCES or instance == Config.INSTANCE - ) - self.assertTrue(instance_existence) - - def test_reload_instance(self): - # Use same arguments as Config.INSTANCE (created in `setUpModule`) - # so we can use reload() on a fresh instance. - instance = Config.CLIENT.instance(INSTANCE_ID) - # Make sure metadata unset before reloading. - instance.display_name = None - - def _expected_display_name(instance): - return instance.display_name == Config.INSTANCE.display_name - - retry = RetryInstanceState(_expected_display_name) - - retry(instance.reload)() - - self.assertEqual(instance.display_name, Config.INSTANCE.display_name) - - @unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation") - def test_create_instance(self): - ALT_INSTANCE_ID = "new" + unique_resource_id("-") - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name) - operation = instance.create() - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(30) # raises on failure / timeout. - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance( - ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name - ) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - - def test_update_instance(self): - OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = "Foo Bar Baz" - Config.INSTANCE.display_name = NEW_DISPLAY_NAME - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(30) # raises on failure / timeout. - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, None) - self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - instance_alt.reload() - self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - - # Make sure to put the instance back the way it was for the - # other test cases. - Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.update() - - -class _TestData(object): - TABLE = "contacts" - COLUMNS = ("contact_id", "first_name", "last_name", "email") - ROW_DATA = ( - (1, u"Phred", u"Phlyntstone", u"phred@example.com"), - (2, u"Bharney", u"Rhubble", u"bharney@example.com"), - (3, u"Wylma", u"Phlyntstone", u"wylma@example.com"), - ) - ALL = KeySet(all_=True) - SQL = "SELECT * FROM contacts ORDER BY contact_id" - - _recurse_into_lists = True - - def _assert_timestamp(self, value, nano_value): - self.assertIsInstance(value, datetime.datetime) - self.assertIsNone(value.tzinfo) - self.assertIs(nano_value.tzinfo, UTC) - - self.assertEqual(value.year, nano_value.year) - self.assertEqual(value.month, nano_value.month) - self.assertEqual(value.day, nano_value.day) - self.assertEqual(value.hour, nano_value.hour) - self.assertEqual(value.minute, nano_value.minute) - self.assertEqual(value.second, nano_value.second) - self.assertEqual(value.microsecond, nano_value.microsecond) - if isinstance(value, DatetimeWithNanoseconds): - self.assertEqual(value.nanosecond, nano_value.nanosecond) - else: - self.assertEqual(value.microsecond * 1000, nano_value.nanosecond) - - def _check_rows_data(self, rows_data, expected=None): - if expected is None: - expected = self.ROW_DATA - - self.assertEqual(len(rows_data), len(expected)) - for row, expected in zip(rows_data, expected): - self._check_row_data(row, expected) - - def _check_row_data(self, row_data, expected): - self.assertEqual(len(row_data), len(expected)) - for found_cell, expected_cell in zip(row_data, expected): - self._check_cell_data(found_cell, expected_cell) - - def _check_cell_data(self, found_cell, expected_cell): - if isinstance(found_cell, DatetimeWithNanoseconds): - self._assert_timestamp(expected_cell, found_cell) - elif isinstance(found_cell, float) and math.isnan(found_cell): - self.assertTrue(math.isnan(expected_cell)) - elif isinstance(found_cell, list) and self._recurse_into_lists: - self.assertEqual(len(found_cell), len(expected_cell)) - for found_item, expected_item in zip(found_cell, expected_cell): - self._check_cell_data(found_item, expected_item) - else: - self.assertEqual(found_cell, expected_cell) - - -class TestDatabaseAPI(unittest.TestCase, _TestData): - DATABASE_NAME = "test_database" + unique_resource_id("_") - - @classmethod - def setUpClass(cls): - pool = BurstyPool(labels={"testcase": "database_api"}) - cls._db = Config.INSTANCE.database( - cls.DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool - ) - operation = cls._db.create() - operation.result(30) # raises on failure / timeout. - - @classmethod - def tearDownClass(cls): - cls._db.drop() - - def setUp(self): - self.to_delete = [] - - def tearDown(self): - for doomed in self.to_delete: - doomed.drop() - - @unittest.skipUnless(USE_RESOURCE_ROUTING, "requires enabling resource routing") - def test_spanner_api_use_user_specified_endpoint(self): - # Clear cache. - Client._endpoint_cache = {} - api = Config.CLIENT.instance_admin_api - resp = api.get_instance( - Config.INSTANCE.name, field_mask={"paths": ["endpoint_uris"]} - ) - if not resp or not resp.endpoint_uris: - return # no resolved endpoint. - resolved_endpoint = resp.endpoint_uris[0] - - client = Client(client_options={"api_endpoint": resolved_endpoint}) - - instance = client.instance(Config.INSTANCE.instance_id) - temp_db_id = "temp_db" + unique_resource_id("_") - temp_db = instance.database(temp_db_id) - temp_db.spanner_api - - # No endpoint cache - Default endpoint used. - self.assertEqual(client._endpoint_cache, {}) - - @unittest.skipUnless(USE_RESOURCE_ROUTING, "requires enabling resource routing") - def test_spanner_api_use_resolved_endpoint(self): - # Clear cache. - Client._endpoint_cache = {} - api = Config.CLIENT.instance_admin_api - resp = api.get_instance( - Config.INSTANCE.name, field_mask={"paths": ["endpoint_uris"]} - ) - if not resp or not resp.endpoint_uris: - return # no resolved endpoint. - resolved_endpoint = resp.endpoint_uris[0] - - client = Client( - client_options=Config.CLIENT._client_options - ) # Use same endpoint as main client. - - instance = client.instance(Config.INSTANCE.instance_id) - temp_db_id = "temp_db" + unique_resource_id("_") - temp_db = instance.database(temp_db_id) - temp_db.spanner_api - - # Endpoint is cached - resolved endpoint used. - self.assertIn(Config.INSTANCE.name, client._endpoint_cache) - self.assertEqual( - client._endpoint_cache[Config.INSTANCE.name], resolved_endpoint - ) - - # Endpoint is cached at a class level. - self.assertIn(Config.INSTANCE.name, Config.CLIENT._endpoint_cache) - self.assertEqual( - Config.CLIENT._endpoint_cache[Config.INSTANCE.name], resolved_endpoint - ) - - def test_list_databases(self): - # Since `Config.INSTANCE` is newly created in `setUpModule`, the - # database created in `setUpClass` here will be the only one. - database_names = [ - database.name for database in Config.INSTANCE.list_databases() - ] - self.assertTrue(self._db.name in database_names) - - def test_create_database(self): - pool = BurstyPool(labels={"testcase": "create_database"}) - temp_db_id = "temp_db" + unique_resource_id("_") - temp_db = Config.INSTANCE.database(temp_db_id, pool=pool) - operation = temp_db.create() - self.to_delete.append(temp_db) - - # We want to make sure the operation completes. - operation.result(30) # raises on failure / timeout. - - database_ids = [ - database.database_id for database in Config.INSTANCE.list_databases() - ] - self.assertIn(temp_db_id, database_ids) - - def test_table_not_found(self): - temp_db_id = "temp_db" + unique_resource_id("_") - - correct_table = "MyTable" - incorrect_table = "NotMyTable" - self.assertNotEqual(correct_table, incorrect_table) - - create_table = ( - "CREATE TABLE {} (\n" - " Id STRING(36) NOT NULL,\n" - " Field1 STRING(36) NOT NULL\n" - ") PRIMARY KEY (Id)" - ).format(correct_table) - index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table) - - temp_db = Config.INSTANCE.database( - temp_db_id, ddl_statements=[create_table, index] - ) - self.to_delete.append(temp_db) - with self.assertRaises(exceptions.NotFound) as exc_info: - temp_db.create() - - expected = "Table not found: {0}".format(incorrect_table) - self.assertEqual(exc_info.exception.args, (expected,)) - - @pytest.mark.skip( - reason=( - "update_dataset_ddl() has a flaky timeout" - "https://github.com/GoogleCloudPlatform/google-cloud-python/issues/" - "5629" - ) - ) - def test_update_database_ddl_with_operation_id(self): - pool = BurstyPool(labels={"testcase": "update_database_ddl"}) - temp_db_id = "temp_db" + unique_resource_id("_") - temp_db = Config.INSTANCE.database(temp_db_id, pool=pool) - create_op = temp_db.create() - self.to_delete.append(temp_db) - - # We want to make sure the operation completes. - create_op.result(240) # raises on failure / timeout. - # random but shortish always start with letter - operation_id = "a" + str(uuid.uuid4())[:8] - operation = temp_db.update_ddl(DDL_STATEMENTS, operation_id=operation_id) - - self.assertEqual(operation_id, operation.operation.name.split("/")[-1]) - - # We want to make sure the operation completes. - operation.result(240) # raises on failure / timeout. - - temp_db.reload() - - self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS)) - - def test_db_batch_insert_then_db_snapshot_read(self): - retry = RetryInstanceState(_has_all_ddl) - retry(self._db.reload)() - - with self._db.batch() as batch: - batch.delete(self.TABLE, self.ALL) - batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA) - - with self._db.snapshot(read_timestamp=batch.committed) as snapshot: - from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL)) - - self._check_rows_data(from_snap) - - def test_db_run_in_transaction_then_snapshot_execute_sql(self): - retry = RetryInstanceState(_has_all_ddl) - retry(self._db.reload)() - - with self._db.batch() as batch: - batch.delete(self.TABLE, self.ALL) - - def _unit_of_work(transaction, test): - rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL)) - test.assertEqual(rows, []) - - transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA) - - self._db.run_in_transaction(_unit_of_work, test=self) - - with self._db.snapshot() as after: - rows = list(after.execute_sql(self.SQL)) - self._check_rows_data(rows) - - def test_db_run_in_transaction_twice(self): - retry = RetryInstanceState(_has_all_ddl) - retry(self._db.reload)() - - with self._db.batch() as batch: - batch.delete(self.TABLE, self.ALL) - - def _unit_of_work(transaction, test): - transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA) - - self._db.run_in_transaction(_unit_of_work, test=self) - self._db.run_in_transaction(_unit_of_work, test=self) - - with self._db.snapshot() as after: - rows = list(after.execute_sql(self.SQL)) - self._check_rows_data(rows) - - def test_db_run_in_transaction_twice_4181(self): - retry = RetryInstanceState(_has_all_ddl) - retry(self._db.reload)() - - with self._db.batch() as batch: - batch.delete(COUNTERS_TABLE, self.ALL) - - def _unit_of_work(transaction, name): - transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]]) - - self._db.run_in_transaction(_unit_of_work, name="id_1") - - with self.assertRaises(exceptions.AlreadyExists): - self._db.run_in_transaction(_unit_of_work, name="id_1") - - self._db.run_in_transaction(_unit_of_work, name="id_2") - - with self._db.snapshot() as after: - rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL)) - self.assertEqual(len(rows), 2) - - -SOME_DATE = datetime.date(2011, 1, 17) -SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612) -NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321) -POS_INF = float("+inf") -NEG_INF = float("-inf") -OTHER_NAN, = struct.unpack(" - self._check_sql_results( - self._db, - sql="SELECT @v", - params={"v": single_value}, - param_types={"v": Type(code=type_name)}, - expected=[(single_value,)], - order=False, - ) - - # Bind a null - self._check_sql_results( - self._db, - sql="SELECT @v", - params={"v": None}, - param_types={"v": Type(code=type_name)}, - expected=[(None,)], - order=False, - ) - - # Bind an array of - array_type = Type(code=ARRAY, array_element_type=Type(code=type_name)) - - if expected_array_value is None: - expected_array_value = array_value - - self._check_sql_results( - self._db, - sql="SELECT @v", - params={"v": array_value}, - param_types={"v": array_type}, - expected=[(expected_array_value,)], - order=False, - ) - - # Bind an empty array of - self._check_sql_results( - self._db, - sql="SELECT @v", - params={"v": []}, - param_types={"v": array_type}, - expected=[([],)], - order=False, - ) - - # Bind a null array of - self._check_sql_results( - self._db, - sql="SELECT @v", - params={"v": None}, - param_types={"v": array_type}, - expected=[(None,)], - order=False, - ) - - def test_execute_sql_w_string_bindings(self): - self._bind_test_helper(STRING, "Phred", ["Phred", "Bharney"]) - - def test_execute_sql_w_bool_bindings(self): - self._bind_test_helper(BOOL, True, [True, False, True]) - - def test_execute_sql_w_int64_bindings(self): - self._bind_test_helper(INT64, 42, [123, 456, 789]) - - def test_execute_sql_w_float64_bindings(self): - self._bind_test_helper(FLOAT64, 42.3, [12.3, 456.0, 7.89]) - - def test_execute_sql_w_float_bindings_transfinite(self): - - # Find -inf - self._check_sql_results( - self._db, - sql="SELECT @neg_inf", - params={"neg_inf": NEG_INF}, - param_types={"neg_inf": Type(code=FLOAT64)}, - expected=[(NEG_INF,)], - order=False, - ) - - # Find +inf - self._check_sql_results( - self._db, - sql="SELECT @pos_inf", - params={"pos_inf": POS_INF}, - param_types={"pos_inf": Type(code=FLOAT64)}, - expected=[(POS_INF,)], - order=False, - ) - - def test_execute_sql_w_bytes_bindings(self): - self._bind_test_helper(BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"]) - - def test_execute_sql_w_timestamp_bindings(self): - import pytz - from google.api_core.datetime_helpers import DatetimeWithNanoseconds - - timestamp_1 = DatetimeWithNanoseconds( - 1989, 1, 17, 17, 59, 12, nanosecond=345612789 - ) - - timestamp_2 = DatetimeWithNanoseconds( - 1989, 1, 17, 17, 59, 13, nanosecond=456127893 - ) - - timestamps = [timestamp_1, timestamp_2] - - # In round-trip, timestamps acquire a timezone value. - expected_timestamps = [ - timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps - ] - - self._recurse_into_lists = False - self._bind_test_helper(TIMESTAMP, timestamp_1, timestamps, expected_timestamps) - - def test_execute_sql_w_date_bindings(self): - import datetime - - dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)] - self._bind_test_helper(DATE, SOME_DATE, dates) - - def test_execute_sql_w_query_param_struct(self): - NAME = "Phred" - COUNT = 123 - SIZE = 23.456 - HEIGHT = 188.0 - WEIGHT = 97.6 - - record_type = param_types.Struct( - [ - param_types.StructField("name", param_types.STRING), - param_types.StructField("count", param_types.INT64), - param_types.StructField("size", param_types.FLOAT64), - param_types.StructField( - "nested", - param_types.Struct( - [ - param_types.StructField("height", param_types.FLOAT64), - param_types.StructField("weight", param_types.FLOAT64), - ] - ), - ), - ] - ) - - # Query with null struct, explicit type - self._check_sql_results( - self._db, - sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight", - params={"r": None}, - param_types={"r": record_type}, - expected=[(None, None, None, None)], - order=False, - ) - - # Query with non-null struct, explicit type, NULL values - self._check_sql_results( - self._db, - sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight", - params={"r": (None, None, None, None)}, - param_types={"r": record_type}, - expected=[(None, None, None, None)], - order=False, - ) - - # Query with non-null struct, explicit type, nested NULL values - self._check_sql_results( - self._db, - sql="SELECT @r.nested.weight", - params={"r": (None, None, None, (None, None))}, - param_types={"r": record_type}, - expected=[(None,)], - order=False, - ) - - # Query with non-null struct, explicit type - self._check_sql_results( - self._db, - sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight", - params={"r": (NAME, COUNT, SIZE, (HEIGHT, WEIGHT))}, - param_types={"r": record_type}, - expected=[(NAME, COUNT, SIZE, WEIGHT)], - order=False, - ) - - # Query with empty struct, explicitly empty type - empty_type = param_types.Struct([]) - self._check_sql_results( - self._db, - sql="SELECT @r IS NULL", - params={"r": ()}, - param_types={"r": empty_type}, - expected=[(False,)], - order=False, - ) - - # Query with null struct, explicitly empty type - self._check_sql_results( - self._db, - sql="SELECT @r IS NULL", - params={"r": None}, - param_types={"r": empty_type}, - expected=[(True,)], - order=False, - ) - - # Query with equality check for struct value - struct_equality_query = ( - "SELECT " '@struct_param=STRUCT(1,"bob")' - ) - struct_type = param_types.Struct( - [ - param_types.StructField("threadf", param_types.INT64), - param_types.StructField("userf", param_types.STRING), - ] - ) - self._check_sql_results( - self._db, - sql=struct_equality_query, - params={"struct_param": (1, "bob")}, - param_types={"struct_param": struct_type}, - expected=[(True,)], - order=False, - ) - - # Query with nullness test for struct - self._check_sql_results( - self._db, - sql="SELECT @struct_param IS NULL", - params={"struct_param": None}, - param_types={"struct_param": struct_type}, - expected=[(True,)], - order=False, - ) - - # Query with null array-of-struct - array_elem_type = param_types.Struct( - [param_types.StructField("threadid", param_types.INT64)] - ) - array_type = param_types.Array(array_elem_type) - self._check_sql_results( - self._db, - sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a", - params={"struct_arr_param": None}, - param_types={"struct_arr_param": array_type}, - expected=[], - order=False, - ) - - # Query with non-null array-of-struct - self._check_sql_results( - self._db, - sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a", - params={"struct_arr_param": [(123,), (456,)]}, - param_types={"struct_arr_param": array_type}, - expected=[(123,), (456,)], - order=False, - ) - - # Query with null array-of-struct field - struct_type_with_array_field = param_types.Struct( - [ - param_types.StructField("intf", param_types.INT64), - param_types.StructField("arraysf", array_type), - ] - ) - self._check_sql_results( - self._db, - sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a", - params={"struct_param": (123, None)}, - param_types={"struct_param": struct_type_with_array_field}, - expected=[], - order=False, - ) - - # Query with non-null array-of-struct field - self._check_sql_results( - self._db, - sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a", - params={"struct_param": (123, ((456,), (789,)))}, - param_types={"struct_param": struct_type_with_array_field}, - expected=[(456,), (789,)], - order=False, - ) - - # Query with anonymous / repeated-name fields - anon_repeated_array_elem_type = param_types.Struct( - [ - param_types.StructField("", param_types.INT64), - param_types.StructField("", param_types.STRING), - ] - ) - anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type) - self._check_sql_results( - self._db, - sql="SELECT CAST(t as STRUCT).* " - "FROM UNNEST(@struct_param) t", - params={"struct_param": [(123, "abcdef")]}, - param_types={"struct_param": anon_repeated_array_type}, - expected=[(123, "abcdef")], - order=False, - ) - - # Query and return a struct parameter - value_type = param_types.Struct( - [ - param_types.StructField("message", param_types.STRING), - param_types.StructField("repeat", param_types.INT64), - ] - ) - value_query = ( - "SELECT ARRAY(SELECT AS STRUCT message, repeat " - "FROM (SELECT @value.message AS message, " - "@value.repeat AS repeat)) AS value" - ) - self._check_sql_results( - self._db, - sql=value_query, - params={"value": ("hello", 1)}, - param_types={"value": value_type}, - expected=[([["hello", 1]],)], - order=False, - ) - - def test_execute_sql_returning_transfinite_floats(self): - - with self._db.snapshot(multi_use=True) as snapshot: - # Query returning -inf, +inf, NaN as column values - rows = list( - snapshot.execute_sql( - "SELECT " - 'CAST("-inf" AS FLOAT64), ' - 'CAST("+inf" AS FLOAT64), ' - 'CAST("NaN" AS FLOAT64)' - ) - ) - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], float("-inf")) - self.assertEqual(rows[0][1], float("+inf")) - # NaNs cannot be compared by equality. - self.assertTrue(math.isnan(rows[0][2])) - - # Query returning array of -inf, +inf, NaN as one column - rows = list( - snapshot.execute_sql( - "SELECT" - ' [CAST("-inf" AS FLOAT64),' - ' CAST("+inf" AS FLOAT64),' - ' CAST("NaN" AS FLOAT64)]' - ) - ) - self.assertEqual(len(rows), 1) - float_array, = rows[0] - self.assertEqual(float_array[0], float("-inf")) - self.assertEqual(float_array[1], float("+inf")) - # NaNs cannot be searched for by equality. - self.assertTrue(math.isnan(float_array[2])) - - def test_partition_query(self): - row_count = 40 - sql = "SELECT * FROM {}".format(self.TABLE) - committed = self._set_up_table(row_count) - all_data_rows = list(self._row_data(row_count)) - - union = [] - batch_txn = self._db.batch_snapshot(read_timestamp=committed) - for batch in batch_txn.generate_query_batches(sql): - p_results_iter = batch_txn.process(batch) - union.extend(list(p_results_iter)) - - self.assertEqual(union, all_data_rows) - batch_txn.close() - - -class TestStreamingChunking(unittest.TestCase, _TestData): - @classmethod - def setUpClass(cls): - from tests.system.utils.streaming_utils import INSTANCE_NAME - from tests.system.utils.streaming_utils import DATABASE_NAME - - instance = Config.CLIENT.instance(INSTANCE_NAME) - if not instance.exists(): - raise unittest.SkipTest( - "Run 'tests/system/utils/populate_streaming.py' to enable." - ) - - database = instance.database(DATABASE_NAME) - if not instance.exists(): - raise unittest.SkipTest( - "Run 'tests/system/utils/populate_streaming.py' to enable." - ) - - cls._db = database - - def _verify_one_column(self, table_desc): - sql = "SELECT chunk_me FROM {}".format(table_desc.table) - with self._db.snapshot() as snapshot: - rows = list(snapshot.execute_sql(sql)) - self.assertEqual(len(rows), table_desc.row_count) - expected = table_desc.value() - for row in rows: - self.assertEqual(row[0], expected) - - def _verify_two_columns(self, table_desc): - sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table) - with self._db.snapshot() as snapshot: - rows = list(snapshot.execute_sql(sql)) - self.assertEqual(len(rows), table_desc.row_count) - expected = table_desc.value() - for row in rows: - self.assertEqual(row[0], expected) - self.assertEqual(row[1], expected) - - def test_four_kay(self): - from tests.system.utils.streaming_utils import FOUR_KAY - - self._verify_one_column(FOUR_KAY) - - def test_forty_kay(self): - from tests.system.utils.streaming_utils import FORTY_KAY - - self._verify_one_column(FORTY_KAY) - - def test_four_hundred_kay(self): - from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY - - self._verify_one_column(FOUR_HUNDRED_KAY) - - def test_four_meg(self): - from tests.system.utils.streaming_utils import FOUR_MEG - - self._verify_two_columns(FOUR_MEG) - - -class CustomException(Exception): - """Placeholder for any user-defined exception.""" - - -class _DatabaseDropper(object): - """Helper for cleaning up databases created on-the-fly.""" - - def __init__(self, db): - self._db = db - - def delete(self): - self._db.drop() - - -class _ReadAbortTrigger(object): - """Helper for tests provoking abort-during-read.""" - - KEY1 = "key1" - KEY2 = "key2" - - def __init__(self): - self.provoker_started = threading.Event() - self.provoker_done = threading.Event() - self.handler_running = threading.Event() - self.handler_done = threading.Event() - - def _provoke_abort_unit_of_work(self, transaction): - keyset = KeySet(keys=[(self.KEY1,)]) - rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset)) - - assert len(rows) == 1 - row = rows[0] - value = row[1] - - self.provoker_started.set() - - self.handler_running.wait() - - transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]]) - - def provoke_abort(self, database): - database.run_in_transaction(self._provoke_abort_unit_of_work) - self.provoker_done.set() - - def _handle_abort_unit_of_work(self, transaction): - keyset_1 = KeySet(keys=[(self.KEY1,)]) - rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1)) - - assert len(rows_1) == 1 - row_1 = rows_1[0] - value_1 = row_1[1] - - self.handler_running.set() - - self.provoker_done.wait() - - keyset_2 = KeySet(keys=[(self.KEY2,)]) - rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2)) - - assert len(rows_2) == 1 - row_2 = rows_2[0] - value_2 = row_2[1] - - transaction.update( - COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]] - ) - - def handle_abort(self, database): - database.run_in_transaction(self._handle_abort_unit_of_work) - self.handler_done.set() - - -class FauxCall(object): - def __init__(self, code, details="FauxCall"): - self._code = code - self._details = details - - def initial_metadata(self): - return {} - - def trailing_metadata(self): - return {} - - def code(self): - return self._code - - def details(self): - return self._details diff --git a/spanner/tests/system/utils/__init__.py b/spanner/tests/system/utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/spanner/tests/system/utils/clear_streaming.py b/spanner/tests/system/utils/clear_streaming.py deleted file mode 100644 index 6c9dee29f5a8..000000000000 --- a/spanner/tests/system/utils/clear_streaming.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Depopulate spanner databases with data for streaming system tests.""" - -from google.cloud.spanner import Client - -# Import relative to the script's directory -from streaming_utils import DATABASE_NAME -from streaming_utils import INSTANCE_NAME -from streaming_utils import print_func - - -def remove_database(client): - instance = client.instance(INSTANCE_NAME) - - if not instance.exists(): - print_func("Instance does not exist: {}".format(INSTANCE_NAME)) - return - - print_func("Instance exists: {}".format(INSTANCE_NAME)) - instance.reload() - - database = instance.database(DATABASE_NAME) - - if not database.exists(): - print_func("Database does not exist: {}".format(DATABASE_NAME)) - return - print_func("Dropping database: {}".format(DATABASE_NAME)) - database.drop() - - -if __name__ == "__main__": - client = Client() - remove_database(client) diff --git a/spanner/tests/system/utils/populate_streaming.py b/spanner/tests/system/utils/populate_streaming.py deleted file mode 100644 index a336228a15a4..000000000000 --- a/spanner/tests/system/utils/populate_streaming.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Populate spanner databases with data for streaming system tests.""" - -from google.cloud.spanner_v1 import Client -from google.cloud.spanner_v1.keyset import KeySet -from google.cloud.spanner_v1.pool import BurstyPool - -# Import relative to the script's directory -from streaming_utils import FOUR_KAY -from streaming_utils import FORTY_KAY -from streaming_utils import FOUR_HUNDRED_KAY -from streaming_utils import FOUR_MEG -from streaming_utils import DATABASE_NAME -from streaming_utils import INSTANCE_NAME -from streaming_utils import print_func - - -DDL = """\ -CREATE TABLE {0.table} ( - pkey INT64, - chunk_me STRING({0.value_size}) ) - PRIMARY KEY (pkey); -CREATE TABLE {1.table} ( - pkey INT64, - chunk_me STRING({1.value_size}) ) - PRIMARY KEY (pkey); -CREATE TABLE {2.table} ( - pkey INT64, - chunk_me STRING({2.value_size}) ) - PRIMARY KEY (pkey); -CREATE TABLE {3.table} ( - pkey INT64, - chunk_me STRING({3.value_size}), - chunk_me_2 STRING({3.value_size}) ) - PRIMARY KEY (pkey); -""".format( - FOUR_KAY, FORTY_KAY, FOUR_HUNDRED_KAY, FOUR_MEG -) - - -DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()] - - -def ensure_database(client): - instance = client.instance(INSTANCE_NAME) - - if not instance.exists(): - configs = list(client.list_instance_configs()) - config_name = configs[0].name - print_func("Creating instance: {}".format(INSTANCE_NAME)) - instance = client.instance(INSTANCE_NAME, config_name) - operation = instance.create() - operation.result(30) - else: - print_func("Instance exists: {}".format(INSTANCE_NAME)) - instance.reload() - - pool = BurstyPool() - database = instance.database( - DATABASE_NAME, ddl_statements=DDL_STATEMENTS, pool=pool - ) - - if not database.exists(): - print_func("Creating database: {}".format(DATABASE_NAME)) - operation = database.create() - operation.result(30) - else: - print_func("Database exists: {}".format(DATABASE_NAME)) - database.reload() - - return database - - -def populate_table(database, table_desc): - all_ = KeySet(all_=True) - columns = ("pkey", "chunk_me") - with database.snapshot() as snapshot: - rows = list( - snapshot.execute_sql("SELECT COUNT(*) FROM {}".format(table_desc.table)) - ) - assert len(rows) == 1 - count = rows[0][0] - if count != table_desc.row_count: - print_func("Repopulating table: {}".format(table_desc.table)) - chunk_me = table_desc.value() - row_data = [(index, chunk_me) for index in range(table_desc.row_count)] - with database.batch() as batch: - batch.delete(table_desc.table, all_) - batch.insert(table_desc.table, columns, row_data) - else: - print_func("Leaving table: {}".format(table_desc.table)) - - -def populate_table_2_columns(database, table_desc): - all_ = KeySet(all_=True) - columns = ("pkey", "chunk_me", "chunk_me_2") - with database.snapshot() as snapshot: - rows = list( - snapshot.execute_sql("SELECT COUNT(*) FROM {}".format(table_desc.table)) - ) - assert len(rows) == 1 - count = rows[0][0] - if count != table_desc.row_count: - print_func("Repopulating table: {}".format(table_desc.table)) - chunk_me = table_desc.value() - row_data = [ - (index, chunk_me, chunk_me) for index in range(table_desc.row_count) - ] - with database.batch() as batch: - batch.delete(table_desc.table, all_) - batch.insert(table_desc.table, columns, row_data) - else: - print_func("Leaving table: {}".format(table_desc.table)) - - -def populate_streaming(client): - database = ensure_database(client) - populate_table(database, FOUR_KAY) - populate_table(database, FORTY_KAY) - populate_table(database, FOUR_HUNDRED_KAY) - # Max STRING column size is just larger than 2 Mb, so use two columns - populate_table_2_columns(database, FOUR_MEG) - - -if __name__ == "__main__": - client = Client() - populate_streaming(client) diff --git a/spanner/tests/system/utils/scrub_instances.py b/spanner/tests/system/utils/scrub_instances.py deleted file mode 100644 index 79cd51fdfc94..000000000000 --- a/spanner/tests/system/utils/scrub_instances.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.cloud.spanner import Client -from .streaming_utils import INSTANCE_NAME as STREAMING_INSTANCE - -STANDARD_INSTANCE = "google-cloud-python-systest" - - -def scrub_instances(client): - for instance in client.list_instances(): - if instance.name == STREAMING_INSTANCE: - print("Not deleting streaming instance: {}".format(STREAMING_INSTANCE)) - continue - elif instance.name == STANDARD_INSTANCE: - print("Not deleting standard instance: {}".format(STANDARD_INSTANCE)) - else: - print("deleting instance: {}".format(instance.name)) - instance.delete() - - -if __name__ == "__main__": - client = Client() - scrub_instances(client) diff --git a/spanner/tests/system/utils/streaming_utils.py b/spanner/tests/system/utils/streaming_utils.py deleted file mode 100644 index a39637bf0f44..000000000000 --- a/spanner/tests/system/utils/streaming_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -import os - -INSTANCE_NAME = "gcp-streaming-systests" -DATABASE_NAME = "testing" -_SHOULD_PRINT = os.getenv("GOOGLE_CLOUD_NO_PRINT") != "true" - - -class _TableDesc( - collections.namedtuple( - "TableDesc", ("table", "row_count", "value_size", "column_count") - ) -): - def value(self): - return u"X" * self.value_size - - -FOUR_KAY = _TableDesc("four_kay", 1000, 4096, 1) -FORTY_KAY = _TableDesc("forty_kay", 100, 4096 * 10, 1) -FOUR_HUNDRED_KAY = _TableDesc("four_hundred_kay", 25, 4096 * 100, 1) -FOUR_MEG = _TableDesc("four_meg", 10, 2048 * 1024, 2) - - -def print_func(message): - if _SHOULD_PRINT: - print(message) diff --git a/spanner/tests/unit/__init__.py b/spanner/tests/unit/__init__.py deleted file mode 100644 index df379f1e9d88..000000000000 --- a/spanner/tests/unit/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/spanner/tests/unit/gapic/v1/test_database_admin_client_v1.py b/spanner/tests/unit/gapic/v1/test_database_admin_client_v1.py deleted file mode 100644 index d828f8ae1cc0..000000000000 --- a/spanner/tests/unit/gapic/v1/test_database_admin_client_v1.py +++ /dev/null @@ -1,460 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import spanner_admin_database_v1 -from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestDatabaseAdminClient(object): - def test_create_database(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = spanner_database_admin_pb2.Database(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_database", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - create_statement = "createStatement552974828" - - response = client.create_database(parent, create_statement) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.CreateDatabaseRequest( - parent=parent, create_statement=create_statement - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_database_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_database_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - create_statement = "createStatement552974828" - - response = client.create_database(parent, create_statement) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_database(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = spanner_database_admin_pb2.Database(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - name = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.get_database(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.GetDatabaseRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_database_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - name = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.get_database(name) - - def test_update_database_ddl(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_database_ddl", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - statements = [] - - response = client.update_database_ddl(database, statements) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.UpdateDatabaseDdlRequest( - database=database, statements=statements - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_database_ddl_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_database_ddl_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - statements = [] - - response = client.update_database_ddl(database, statements) - exception = response.exception() - assert exception.errors[0] == error - - def test_drop_database(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - client.drop_database(database) - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.DropDatabaseRequest( - database=database - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_database_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.drop_database(database) - - def test_get_database_ddl(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_database_admin_pb2.GetDatabaseDdlResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.get_database_ddl(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.GetDatabaseDdlRequest( - database=database - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_database_ddl_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.get_database_ddl(database) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_list_databases(self): - # Setup Expected Response - next_page_token = "" - databases_element = {} - databases = [databases_element] - expected_response = {"next_page_token": next_page_token, "databases": databases} - expected_response = spanner_database_admin_pb2.ListDatabasesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_databases(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.databases[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_database_admin_pb2.ListDatabasesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_databases_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_database_v1.DatabaseAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_databases(parent) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/spanner/tests/unit/gapic/v1/test_instance_admin_client_v1.py b/spanner/tests/unit/gapic/v1/test_instance_admin_client_v1.py deleted file mode 100644 index da8dfcd8d410..000000000000 --- a/spanner/tests/unit/gapic/v1/test_instance_admin_client_v1.py +++ /dev/null @@ -1,538 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import spanner_admin_instance_v1 -from google.cloud.spanner_admin_instance_v1.proto import spanner_instance_admin_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestInstanceAdminClient(object): - def test_list_instance_configs(self): - # Setup Expected Response - next_page_token = "" - instance_configs_element = {} - instance_configs = [instance_configs_element] - expected_response = { - "next_page_token": next_page_token, - "instance_configs": instance_configs, - } - expected_response = spanner_instance_admin_pb2.ListInstanceConfigsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instance_configs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.instance_configs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instance_configs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instance_configs(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_instance_config(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = spanner_instance_admin_pb2.InstanceConfig( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_config_path("[PROJECT]", "[INSTANCE_CONFIG]") - - response = client.get_instance_config(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.GetInstanceConfigRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_config_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_config_path("[PROJECT]", "[INSTANCE_CONFIG]") - - with pytest.raises(CustomException): - client.get_instance_config(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "" - instances_element = {} - instances = [instances_element] - expected_response = {"next_page_token": next_page_token, "instances": instances} - expected_response = spanner_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instances(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.instances[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - paged_list_response = client.list_instances(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name_2, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - - response = client.create_instance(parent, instance_id, instance) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - - response = client.create_instance(parent, instance_id, instance) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - config = "config-1354792126" - display_name = "displayName1615086568" - node_count = 1539922066 - expected_response = { - "name": name, - "config": config, - "display_name": display_name, - "node_count": node_count, - } - expected_response = spanner_instance_admin_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - instance = {} - field_mask = {} - - response = client.update_instance(instance, field_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.UpdateInstanceRequest( - instance=instance, field_mask=field_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - instance = {} - field_mask = {} - - response = client.update_instance(instance, field_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = spanner_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"21" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_admin_instance_v1.InstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/spanner/tests/unit/gapic/v1/test_spanner_client_v1.py b/spanner/tests/unit/gapic/v1/test_spanner_client_v1.py deleted file mode 100644 index a13390265837..000000000000 --- a/spanner/tests/unit/gapic/v1/test_spanner_client_v1.py +++ /dev/null @@ -1,722 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud.spanner_v1.gapic import spanner_client as spanner_v1 -from google.cloud.spanner_v1.proto import keys_pb2 -from google.cloud.spanner_v1.proto import result_set_pb2 -from google.cloud.spanner_v1.proto import spanner_pb2 -from google.cloud.spanner_v1.proto import transaction_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestSpannerClient(object): - def test_create_session(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = spanner_pb2.Session(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - response = client.create_session(database) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.CreateSessionRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - with pytest.raises(CustomException): - client.create_session(database) - - def test_batch_create_sessions(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.BatchCreateSessionsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - session_count = 185691686 - - response = client.batch_create_sessions(database, session_count) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.BatchCreateSessionsRequest( - database=database, session_count=session_count - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_batch_create_sessions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - session_count = 185691686 - - with pytest.raises(CustomException): - client.batch_create_sessions(database, session_count) - - def test_get_session(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = spanner_pb2.Session(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - response = client.get_session(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.GetSessionRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - with pytest.raises(CustomException): - client.get_session(name) - - def test_list_sessions(self): - # Setup Expected Response - next_page_token = "" - sessions_element = {} - sessions = [sessions_element] - expected_response = {"next_page_token": next_page_token, "sessions": sessions} - expected_response = spanner_pb2.ListSessionsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - paged_list_response = client.list_sessions(database) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.sessions[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ListSessionsRequest(database=database) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_sessions_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - database = client.database_path("[PROJECT]", "[INSTANCE]", "[DATABASE]") - - paged_list_response = client.list_sessions(database) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_session(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - client.delete_session(name) - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.DeleteSessionRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_session_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - name = client.session_path("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") - - with pytest.raises(CustomException): - client.delete_session(name) - - def test_execute_sql(self): - # Setup Expected Response - expected_response = {} - expected_response = result_set_pb2.ResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.execute_sql(session, sql) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteSqlRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_sql_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.execute_sql(session, sql) - - def test_execute_streaming_sql(self): - # Setup Expected Response - chunked_value = True - resume_token = b"103" - expected_response = { - "chunked_value": chunked_value, - "resume_token": resume_token, - } - expected_response = result_set_pb2.PartialResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.execute_streaming_sql(session, sql) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteSqlRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_streaming_sql_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.execute_streaming_sql(session, sql) - - def test_execute_batch_dml(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.ExecuteBatchDmlResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction = {} - statements = [] - seqno = 109325920 - - response = client.execute_batch_dml(session, transaction, statements, seqno) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ExecuteBatchDmlRequest( - session=session, transaction=transaction, statements=statements, seqno=seqno - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_execute_batch_dml_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction = {} - statements = [] - seqno = 109325920 - - with pytest.raises(CustomException): - client.execute_batch_dml(session, transaction, statements, seqno) - - def test_read(self): - # Setup Expected Response - expected_response = {} - expected_response = result_set_pb2.ResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - response = client.read(session, table, columns, key_set) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ReadRequest( - session=session, table=table, columns=columns, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - with pytest.raises(CustomException): - client.read(session, table, columns, key_set) - - def test_streaming_read(self): - # Setup Expected Response - chunked_value = True - resume_token = b"103" - expected_response = { - "chunked_value": chunked_value, - "resume_token": resume_token, - } - expected_response = result_set_pb2.PartialResultSet(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - response = client.streaming_read(session, table, columns, key_set) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.ReadRequest( - session=session, table=table, columns=columns, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_streaming_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - columns = [] - key_set = {} - - with pytest.raises(CustomException): - client.streaming_read(session, table, columns, key_set) - - def test_begin_transaction(self): - # Setup Expected Response - id_ = b"27" - expected_response = {"id": id_} - expected_response = transaction_pb2.Transaction(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - options_ = {} - - response = client.begin_transaction(session, options_) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.BeginTransactionRequest( - session=session, options=options_ - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_begin_transaction_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - options_ = {} - - with pytest.raises(CustomException): - client.begin_transaction(session, options_) - - def test_commit(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.CommitResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - - response = client.commit(session) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.CommitRequest(session=session) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_commit_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - - with pytest.raises(CustomException): - client.commit(session) - - def test_rollback(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction_id = b"28" - - client.rollback(session, transaction_id) - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.RollbackRequest( - session=session, transaction_id=transaction_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_rollback_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - transaction_id = b"28" - - with pytest.raises(CustomException): - client.rollback(session, transaction_id) - - def test_partition_query(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.PartitionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - response = client.partition_query(session, sql) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.PartitionQueryRequest(session=session, sql=sql) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partition_query_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - sql = "sql114126" - - with pytest.raises(CustomException): - client.partition_query(session, sql) - - def test_partition_read(self): - # Setup Expected Response - expected_response = {} - expected_response = spanner_pb2.PartitionResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup Request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - key_set = {} - - response = client.partition_read(session, table, key_set) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = spanner_pb2.PartitionReadRequest( - session=session, table=table, key_set=key_set - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partition_read_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = spanner_v1.SpannerClient() - - # Setup request - session = client.session_path( - "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]" - ) - table = "table110115790" - key_set = {} - - with pytest.raises(CustomException): - client.partition_read(session, table, key_set) diff --git a/spanner/tests/unit/streaming-read-acceptance-test.json b/spanner/tests/unit/streaming-read-acceptance-test.json deleted file mode 100644 index 9b44b4077812..000000000000 --- a/spanner/tests/unit/streaming-read-acceptance-test.json +++ /dev/null @@ -1,217 +0,0 @@ -{"tests": [ - { - "result": {"value": [[ - true, - "abc", - "100", - 1.1, - "YWJj", - [ - "abc", - "def", - null, - "ghi" - ], - [ - ["abc"], - ["def"], - ["ghi"] - ] - ]]}, - "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"BOOL\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f3\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }, {\n \"name\": \"f4\",\n \"type\": {\n \"code\": \"FLOAT64\"\n }\n }, {\n \"name\": \"f5\",\n \"type\": {\n \"code\": \"BYTES\"\n }\n }, {\n \"name\": \"f6\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }, {\n \"name\": \"f7\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f71\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [true, \"abc\", \"100\", 1.1, \"YWJj\", [\"abc\", \"def\", null, \"ghi\"], [[\"abc\"], [\"def\"], [\"ghi\"]]]\n}"], - "name": "Basic Test" - }, - { - "result": {"value": [["abcdefghi"]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n },\n \"values\": [\"abc\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"def\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"ghi\"]\n}" - ], - "name": "String Chunking Test" - }, - { - "result": {"value": [[[ - "abc", - "def", - "ghi", - "jkl" - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"d\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"ef\", \"gh\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"i\", \"jkl\"]]\n}" - ], - "name": "String Array Chunking Test" - }, - { - "result": {"value": [[[ - "abc", - "def", - null, - "ghi", - null, - "jkl" - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"def\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[null, \"ghi\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[null, \"jkl\"]]\n}" - ], - "name": "String Array Chunking Test With Nulls" - }, - { - "result": {"value": [[[ - "abc", - "def", - "ghi", - "jkl" - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\", \"def\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"\", \"ghi\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"\", \"jkl\"]]\n}" - ], - "name": "String Array Chunking Test With Empty Strings" - }, - { - "result": {"value": [[["abcdefghi"]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRING\"\n }\n }\n }]\n }\n },\n \"values\": [[\"abc\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"def\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"ghi\"]]\n}" - ], - "name": "String Array Chunking Test With One Large String" - }, - { - "result": {"value": [[[ - "1", - "23", - "4", - null, - 5 - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"INT64\"\n }\n }\n }]\n }\n },\n \"values\": [[\"1\", \"2\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"3\", \"4\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"\", null, \"5\"]]\n}" - ], - "name": "INT64 Array Chunking Test" - }, - { - "result": {"value": [[[ - 1, - 2, - "Infinity", - "-Infinity", - "NaN", - null, - 3 - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"FLOAT64\"\n }\n }\n }]\n }\n },\n \"values\": [[1.0, 2.0]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"Infinity\", \"-Infinity\", \"NaN\"]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[\"\", null, 3.0]]\n}" - ], - "name": "FLOAT64 Array Chunking Test" - }, - { - "result": {"value": [[[ - [ - "abc", - "defghi" - ], - [ - "123", - "456" - ] - ]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[\"abc\", \"def\"]]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[[\"ghi\"], [\"123\", \"456\"]]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[[\"\"]]]\n}" - ], - "name": "Struct Array Chunking Test" - }, - { - "result": {"value": [[[[[["abc"]]]]]]}, - "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[[[\"abc\"]]]]]\n}"], - "name": "Nested Struct Array Test" - }, - { - "result": {"value": [[[[[ - ["abc"], - ["def"] - ]]]]]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f11\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f12\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [[[[[\"ab\"]]]]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[[[[\"c\"], [\"def\"]]]]]\n}" - ], - "name": "Nested Struct Array Chunking Test" - }, - { - "result": {"value": [ - [ - "1", - [["ab"]] - ], - [ - "2", - [["c"]] - ] - ]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"ARRAY\",\n \"arrayElementType\": {\n \"code\": \"STRUCT\",\n \"structType\": {\n \"fields\": [{\n \"name\": \"f21\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n }\n }\n }]\n }\n },\n \"values\": [\"1\", [[\"a\"]]],\n \"chunkedValue\": true\n}", - "{\n \"values\": [[[\"b\"]], \"2\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"\", [[\"c\"]]]\n}" - ], - "name": "Struct Array And String Chunking Test" - }, - { - "result": {"value": [ - [ - "abc", - "1" - ], - [ - "def", - "2" - ] - ]}, - "chunks": ["{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }]\n }\n },\n \"values\": [\"abc\", \"1\", \"def\", \"2\"]\n}"], - "name": "Multiple Row Single Chunk" - }, - { - "result": {"value": [ - [ - "abc", - "1" - ], - [ - "def", - "2" - ] - ]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }, {\n \"name\": \"f2\",\n \"type\": {\n \"code\": \"INT64\"\n }\n }]\n }\n },\n \"values\": [\"ab\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"c\", \"1\", \"de\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"f\", \"2\"]\n}" - ], - "name": "Multiple Row Multiple Chunks" - }, - { - "result": {"value": [ - ["ab"], - ["c"], - ["d"], - ["ef"] - ]}, - "chunks": [ - "{\n \"metadata\": {\n \"rowType\": {\n \"fields\": [{\n \"name\": \"f1\",\n \"type\": {\n \"code\": \"STRING\"\n }\n }]\n }\n },\n \"values\": [\"a\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"b\", \"c\"]\n}", - "{\n \"values\": [\"d\", \"e\"],\n \"chunkedValue\": true\n}", - "{\n \"values\": [\"f\"]\n}" - ], - "name": "Multiple Row Chunks/Non Chunks Interleaved" - } -]} diff --git a/spanner/tests/unit/test__helpers.py b/spanner/tests/unit/test__helpers.py deleted file mode 100644 index 86ce78727b46..000000000000 --- a/spanner/tests/unit/test__helpers.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - - -class Test_make_value_pb(unittest.TestCase): - def _callFUT(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _make_value_pb - - return _make_value_pb(*args, **kw) - - def test_w_None(self): - value_pb = self._callFUT(None) - self.assertTrue(value_pb.HasField("null_value")) - - def test_w_bytes(self): - from google.protobuf.struct_pb2 import Value - - BYTES = b"BYTES" - expected = Value(string_value=BYTES) - value_pb = self._callFUT(BYTES) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb, expected) - - def test_w_invalid_bytes(self): - BYTES = b"\xff\xfe\x03&" - with self.assertRaises(ValueError): - self._callFUT(BYTES) - - def test_w_explicit_unicode(self): - from google.protobuf.struct_pb2 import Value - - TEXT = u"TEXT" - value_pb = self._callFUT(TEXT) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, TEXT) - - def test_w_list(self): - from google.protobuf.struct_pb2 import Value - from google.protobuf.struct_pb2 import ListValue - - value_pb = self._callFUT([u"a", u"b", u"c"]) - self.assertIsInstance(value_pb, Value) - self.assertIsInstance(value_pb.list_value, ListValue) - values = value_pb.list_value.values - self.assertEqual([value.string_value for value in values], [u"a", u"b", u"c"]) - - def test_w_tuple(self): - from google.protobuf.struct_pb2 import Value - from google.protobuf.struct_pb2 import ListValue - - value_pb = self._callFUT((u"a", u"b", u"c")) - self.assertIsInstance(value_pb, Value) - self.assertIsInstance(value_pb.list_value, ListValue) - values = value_pb.list_value.values - self.assertEqual([value.string_value for value in values], [u"a", u"b", u"c"]) - - def test_w_bool(self): - from google.protobuf.struct_pb2 import Value - - value_pb = self._callFUT(True) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.bool_value, True) - - def test_w_int(self): - import six - from google.protobuf.struct_pb2 import Value - - for int_type in six.integer_types: # include 'long' on Python 2 - value_pb = self._callFUT(int_type(42)) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, "42") - - def test_w_float(self): - from google.protobuf.struct_pb2 import Value - - value_pb = self._callFUT(3.14159) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.number_value, 3.14159) - - def test_w_float_nan(self): - from google.protobuf.struct_pb2 import Value - - value_pb = self._callFUT(float("nan")) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, "NaN") - - def test_w_float_neg_inf(self): - from google.protobuf.struct_pb2 import Value - - value_pb = self._callFUT(float("-inf")) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, "-Infinity") - - def test_w_float_pos_inf(self): - from google.protobuf.struct_pb2 import Value - - value_pb = self._callFUT(float("inf")) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, "Infinity") - - def test_w_date(self): - import datetime - from google.protobuf.struct_pb2 import Value - - today = datetime.date.today() - value_pb = self._callFUT(today) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, today.isoformat()) - - def test_w_timestamp_w_nanos(self): - import pytz - from google.protobuf.struct_pb2 import Value - from google.api_core import datetime_helpers - - when = datetime_helpers.DatetimeWithNanoseconds( - 2016, 12, 20, 21, 13, 47, nanosecond=123456789, tzinfo=pytz.UTC - ) - value_pb = self._callFUT(when) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, when.rfc3339()) - - def test_w_listvalue(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1._helpers import _make_list_value_pb - - list_value = _make_list_value_pb([1, 2, 3]) - value_pb = self._callFUT(list_value) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.list_value, list_value) - - def test_w_datetime(self): - import datetime - import pytz - from google.protobuf.struct_pb2 import Value - from google.api_core import datetime_helpers - - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - value_pb = self._callFUT(now) - self.assertIsInstance(value_pb, Value) - self.assertEqual(value_pb.string_value, datetime_helpers.to_rfc3339(now)) - - def test_w_unknown_type(self): - with self.assertRaises(ValueError): - self._callFUT(object()) - - -class Test_make_list_value_pb(unittest.TestCase): - def _callFUT(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _make_list_value_pb - - return _make_list_value_pb(*args, **kw) - - def test_empty(self): - from google.protobuf.struct_pb2 import ListValue - - result = self._callFUT(values=[]) - self.assertIsInstance(result, ListValue) - self.assertEqual(len(result.values), 0) - - def test_w_single_value(self): - from google.protobuf.struct_pb2 import ListValue - - VALUE = u"value" - result = self._callFUT(values=[VALUE]) - self.assertIsInstance(result, ListValue) - self.assertEqual(len(result.values), 1) - self.assertEqual(result.values[0].string_value, VALUE) - - def test_w_multiple_values(self): - from google.protobuf.struct_pb2 import ListValue - - VALUE_1 = u"value" - VALUE_2 = 42 - result = self._callFUT(values=[VALUE_1, VALUE_2]) - self.assertIsInstance(result, ListValue) - self.assertEqual(len(result.values), 2) - self.assertEqual(result.values[0].string_value, VALUE_1) - self.assertEqual(result.values[1].string_value, str(VALUE_2)) - - -class Test_make_list_value_pbs(unittest.TestCase): - def _callFUT(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _make_list_value_pbs - - return _make_list_value_pbs(*args, **kw) - - def test_empty(self): - result = self._callFUT(values=[]) - self.assertEqual(result, []) - - def test_w_single_values(self): - from google.protobuf.struct_pb2 import ListValue - - values = [[0], [1]] - result = self._callFUT(values=values) - self.assertEqual(len(result), len(values)) - for found, expected in zip(result, values): - self.assertIsInstance(found, ListValue) - self.assertEqual(len(found.values), 1) - self.assertEqual(found.values[0].string_value, str(expected[0])) - - def test_w_multiple_values(self): - from google.protobuf.struct_pb2 import ListValue - - values = [[0, u"A"], [1, u"B"]] - result = self._callFUT(values=values) - self.assertEqual(len(result), len(values)) - for found, expected in zip(result, values): - self.assertIsInstance(found, ListValue) - self.assertEqual(len(found.values), 2) - self.assertEqual(found.values[0].string_value, str(expected[0])) - self.assertEqual(found.values[1].string_value, expected[1]) - - -class Test_parse_value_pb(unittest.TestCase): - def _callFUT(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _parse_value_pb - - return _parse_value_pb(*args, **kw) - - def test_w_null(self): - from google.protobuf.struct_pb2 import Value, NULL_VALUE - from google.cloud.spanner_v1.proto.type_pb2 import Type, STRING - - field_type = Type(code=STRING) - value_pb = Value(null_value=NULL_VALUE) - - self.assertEqual(self._callFUT(value_pb, field_type), None) - - def test_w_string(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, STRING - - VALUE = u"Value" - field_type = Type(code=STRING) - value_pb = Value(string_value=VALUE) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_bytes(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, BYTES - - VALUE = b"Value" - field_type = Type(code=BYTES) - value_pb = Value(string_value=VALUE) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_bool(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, BOOL - - VALUE = True - field_type = Type(code=BOOL) - value_pb = Value(bool_value=VALUE) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_int(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, INT64 - - VALUE = 12345 - field_type = Type(code=INT64) - value_pb = Value(string_value=str(VALUE)) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_float(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, FLOAT64 - - VALUE = 3.14159 - field_type = Type(code=FLOAT64) - value_pb = Value(number_value=VALUE) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_date(self): - import datetime - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, DATE - - VALUE = datetime.date.today() - field_type = Type(code=DATE) - value_pb = Value(string_value=VALUE.isoformat()) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUE) - - def test_w_timestamp_wo_nanos(self): - import pytz - from google.protobuf.struct_pb2 import Value - from google.api_core import datetime_helpers - from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP - from google.cloud.spanner_v1.proto.type_pb2 import Type - - value = datetime_helpers.DatetimeWithNanoseconds( - 2016, 12, 20, 21, 13, 47, microsecond=123456, tzinfo=pytz.UTC - ) - field_type = Type(code=TIMESTAMP) - value_pb = Value(string_value=datetime_helpers.to_rfc3339(value)) - - parsed = self._callFUT(value_pb, field_type) - self.assertIsInstance(parsed, datetime_helpers.DatetimeWithNanoseconds) - self.assertEqual(parsed, value) - - def test_w_timestamp_w_nanos(self): - import pytz - from google.protobuf.struct_pb2 import Value - from google.api_core import datetime_helpers - from google.cloud.spanner_v1.proto.type_pb2 import TIMESTAMP - from google.cloud.spanner_v1.proto.type_pb2 import Type - - value = datetime_helpers.DatetimeWithNanoseconds( - 2016, 12, 20, 21, 13, 47, nanosecond=123456789, tzinfo=pytz.UTC - ) - field_type = Type(code=TIMESTAMP) - value_pb = Value(string_value=datetime_helpers.to_rfc3339(value)) - - parsed = self._callFUT(value_pb, field_type) - self.assertIsInstance(parsed, datetime_helpers.DatetimeWithNanoseconds) - self.assertEqual(parsed, value) - - def test_w_array_empty(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, ARRAY, INT64 - - field_type = Type(code=ARRAY, array_element_type=Type(code=INT64)) - value_pb = Value() - - self.assertEqual(self._callFUT(value_pb, field_type), []) - - def test_w_array_non_empty(self): - from google.protobuf.struct_pb2 import Value, ListValue - from google.cloud.spanner_v1.proto.type_pb2 import Type, ARRAY, INT64 - - field_type = Type(code=ARRAY, array_element_type=Type(code=INT64)) - VALUES = [32, 19, 5] - values_pb = ListValue( - values=[Value(string_value=str(value)) for value in VALUES] - ) - value_pb = Value(list_value=values_pb) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUES) - - def test_w_struct(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRUCT, STRING, INT64 - from google.cloud.spanner_v1._helpers import _make_list_value_pb - - VALUES = [u"phred", 32] - struct_type_pb = StructType( - fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), - ] - ) - field_type = Type(code=STRUCT, struct_type=struct_type_pb) - value_pb = Value(list_value=_make_list_value_pb(VALUES)) - - self.assertEqual(self._callFUT(value_pb, field_type), VALUES) - - def test_w_unknown_type(self): - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.type_pb2 import Type - from google.cloud.spanner_v1.proto.type_pb2 import TYPE_CODE_UNSPECIFIED - - field_type = Type(code=TYPE_CODE_UNSPECIFIED) - value_pb = Value(string_value="Borked") - - with self.assertRaises(ValueError): - self._callFUT(value_pb, field_type) - - -class Test_parse_list_value_pbs(unittest.TestCase): - def _callFUT(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _parse_list_value_pbs - - return _parse_list_value_pbs(*args, **kw) - - def test_empty(self): - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 - - struct_type_pb = StructType( - fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), - ] - ) - - self.assertEqual(self._callFUT(rows=[], row_type=struct_type_pb), []) - - def test_non_empty(self): - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 - from google.cloud.spanner_v1._helpers import _make_list_value_pbs - - VALUES = [[u"phred", 32], [u"bharney", 31]] - struct_type_pb = StructType( - fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), - ] - ) - values_pbs = _make_list_value_pbs(VALUES) - - self.assertEqual( - self._callFUT(rows=values_pbs, row_type=struct_type_pb), VALUES - ) - - -class Test_SessionWrapper(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1._helpers import _SessionWrapper - - return _SessionWrapper - - def _make_one(self, session): - return self._getTargetClass()(session) - - def test_ctor(self): - session = object() - base = self._make_one(session) - self.assertIs(base._session, session) - - -class Test_metadata_with_prefix(unittest.TestCase): - def _call_fut(self, *args, **kw): - from google.cloud.spanner_v1._helpers import _metadata_with_prefix - - return _metadata_with_prefix(*args, **kw) - - def test(self): - prefix = "prefix" - metadata = self._call_fut(prefix) - self.assertEqual(metadata, [("google-cloud-resource-prefix", prefix)]) diff --git a/spanner/tests/unit/test_batch.py b/spanner/tests/unit/test_batch.py deleted file mode 100644 index f4fd9d12ab3d..000000000000 --- a/spanner/tests/unit/test_batch.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - - -TABLE_NAME = "citizens" -COLUMNS = ["email", "first_name", "last_name", "age"] -VALUES = [ - [u"phred@exammple.com", u"Phred", u"Phlyntstone", 32], - [u"bharney@example.com", u"Bharney", u"Rhubble", 31], -] - - -class _BaseTest(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - DATABASE_ID = "database-id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session-id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - -class Test_BatchBase(_BaseTest): - def _getTargetClass(self): - from google.cloud.spanner_v1.batch import _BatchBase - - return _BatchBase - - def _compare_values(self, result, source): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - - for found, expected in zip(result, source): - self.assertIsInstance(found, ListValue) - self.assertEqual(len(found.values), len(expected)) - for found_cell, expected_cell in zip(found.values, expected): - self.assertIsInstance(found_cell, Value) - if isinstance(expected_cell, int): - self.assertEqual(int(found_cell.string_value), expected_cell) - else: - self.assertEqual(found_cell.string_value, expected_cell) - - def test_ctor(self): - session = _Session() - base = self._make_one(session) - self.assertIs(base._session, session) - self.assertEqual(len(base._mutations), 0) - - def test__check_state_virtual(self): - session = _Session() - base = self._make_one(session) - with self.assertRaises(NotImplementedError): - base._check_state() - - def test_insert(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation - - session = _Session() - base = self._make_one(session) - - base.insert(TABLE_NAME, columns=COLUMNS, values=VALUES) - - self.assertEqual(len(base._mutations), 1) - mutation = base._mutations[0] - self.assertIsInstance(mutation, Mutation) - write = mutation.insert - self.assertIsInstance(write, Mutation.Write) - self.assertEqual(write.table, TABLE_NAME) - self.assertEqual(write.columns, COLUMNS) - self._compare_values(write.values, VALUES) - - def test_update(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation - - session = _Session() - base = self._make_one(session) - - base.update(TABLE_NAME, columns=COLUMNS, values=VALUES) - - self.assertEqual(len(base._mutations), 1) - mutation = base._mutations[0] - self.assertIsInstance(mutation, Mutation) - write = mutation.update - self.assertIsInstance(write, Mutation.Write) - self.assertEqual(write.table, TABLE_NAME) - self.assertEqual(write.columns, COLUMNS) - self._compare_values(write.values, VALUES) - - def test_insert_or_update(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation - - session = _Session() - base = self._make_one(session) - - base.insert_or_update(TABLE_NAME, columns=COLUMNS, values=VALUES) - - self.assertEqual(len(base._mutations), 1) - mutation = base._mutations[0] - self.assertIsInstance(mutation, Mutation) - write = mutation.insert_or_update - self.assertIsInstance(write, Mutation.Write) - self.assertEqual(write.table, TABLE_NAME) - self.assertEqual(write.columns, COLUMNS) - self._compare_values(write.values, VALUES) - - def test_replace(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation - - session = _Session() - base = self._make_one(session) - - base.replace(TABLE_NAME, columns=COLUMNS, values=VALUES) - - self.assertEqual(len(base._mutations), 1) - mutation = base._mutations[0] - self.assertIsInstance(mutation, Mutation) - write = mutation.replace - self.assertIsInstance(write, Mutation.Write) - self.assertEqual(write.table, TABLE_NAME) - self.assertEqual(write.columns, COLUMNS) - self._compare_values(write.values, VALUES) - - def test_delete(self): - from google.cloud.spanner_v1.proto.mutation_pb2 import Mutation - from google.cloud.spanner_v1.keyset import KeySet - - keys = [[0], [1], [2]] - keyset = KeySet(keys=keys) - session = _Session() - base = self._make_one(session) - - base.delete(TABLE_NAME, keyset=keyset) - - self.assertEqual(len(base._mutations), 1) - mutation = base._mutations[0] - self.assertIsInstance(mutation, Mutation) - delete = mutation.delete - self.assertIsInstance(delete, Mutation.Delete) - self.assertEqual(delete.table, TABLE_NAME) - key_set_pb = delete.key_set - self.assertEqual(len(key_set_pb.ranges), 0) - self.assertEqual(len(key_set_pb.keys), len(keys)) - for found, expected in zip(key_set_pb.keys, keys): - self.assertEqual( - [int(value.string_value) for value in found.values], expected - ) - - -class TestBatch(_BaseTest): - def _getTargetClass(self): - from google.cloud.spanner_v1.batch import Batch - - return Batch - - def test_ctor(self): - session = _Session() - batch = self._make_one(session) - self.assertIs(batch._session, session) - - def test_commit_already_committed(self): - from google.cloud.spanner_v1.keyset import KeySet - - keys = [[0], [1], [2]] - keyset = KeySet(keys=keys) - database = _Database() - session = _Session(database) - batch = self._make_one(session) - batch.committed = object() - batch.delete(TABLE_NAME, keyset=keyset) - - with self.assertRaises(ValueError): - batch.commit() - - def test_commit_grpc_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1.keyset import KeySet - - keys = [[0], [1], [2]] - keyset = KeySet(keys=keys) - database = _Database() - database.spanner_api = _FauxSpannerAPI(_rpc_error=True) - session = _Session(database) - batch = self._make_one(session) - batch.delete(TABLE_NAME, keyset=keyset) - - with self.assertRaises(Unknown): - batch.commit() - - def test_commit_ok(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI(_commit_response=response) - session = _Session(database) - batch = self._make_one(session) - batch.insert(TABLE_NAME, COLUMNS, VALUES) - - committed = batch.commit() - - self.assertEqual(committed, now) - self.assertEqual(batch.committed, committed) - - (session, mutations, single_use_txn, metadata) = api._committed - self.assertEqual(session, self.SESSION_NAME) - self.assertEqual(mutations, batch._mutations) - self.assertIsInstance(single_use_txn, TransactionOptions) - self.assertTrue(single_use_txn.HasField("read_write")) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_context_mgr_already_committed(self): - import datetime - from google.cloud._helpers import UTC - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI() - session = _Session(database) - batch = self._make_one(session) - batch.committed = now - - with self.assertRaises(ValueError): - with batch: - pass # pragma: NO COVER - - self.assertEqual(api._committed, None) - - def test_context_mgr_success(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI(_commit_response=response) - session = _Session(database) - batch = self._make_one(session) - - with batch: - batch.insert(TABLE_NAME, COLUMNS, VALUES) - - self.assertEqual(batch.committed, now) - - (session, mutations, single_use_txn, metadata) = api._committed - self.assertEqual(session, self.SESSION_NAME) - self.assertEqual(mutations, batch._mutations) - self.assertIsInstance(single_use_txn, TransactionOptions) - self.assertTrue(single_use_txn.HasField("read_write")) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_context_mgr_failure(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI(_commit_response=response) - session = _Session(database) - batch = self._make_one(session) - - class _BailOut(Exception): - pass - - with self.assertRaises(_BailOut): - with batch: - batch.insert(TABLE_NAME, COLUMNS, VALUES) - raise _BailOut() - - self.assertEqual(batch.committed, None) - self.assertEqual(api._committed, None) - self.assertEqual(len(batch._mutations), 1) - - -class _Session(object): - def __init__(self, database=None, name=TestBatch.SESSION_NAME): - self._database = database - self.name = name - - -class _Database(object): - name = "testing" - - -class _FauxSpannerAPI: - - _create_instance_conflict = False - _instance_not_found = False - _committed = None - _rpc_error = False - - def __init__(self, **kwargs): - self.__dict__.update(**kwargs) - - def commit( - self, - session, - mutations, - transaction_id="", - single_use_transaction=None, - metadata=None, - ): - from google.api_core.exceptions import Unknown - - assert transaction_id == "" - self._committed = (session, mutations, single_use_transaction, metadata) - if self._rpc_error: - raise Unknown("error") - return self._commit_response diff --git a/spanner/tests/unit/test_client.py b/spanner/tests/unit/test_client.py deleted file mode 100644 index 35e63bfd68d6..000000000000 --- a/spanner/tests/unit/test_client.py +++ /dev/null @@ -1,452 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -def _make_credentials(): - import google.auth.credentials - - class _CredentialsWithScopes( - google.auth.credentials.Credentials, google.auth.credentials.Scoped - ): - pass - - return mock.Mock(spec=_CredentialsWithScopes) - - -class TestClient(unittest.TestCase): - - PROJECT = "PROJECT" - PATH = "projects/%s" % (PROJECT,) - CONFIGURATION_NAME = "config-name" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "%s/instances/%s" % (PATH, INSTANCE_ID) - DISPLAY_NAME = "display-name" - NODE_COUNT = 5 - TIMEOUT_SECONDS = 80 - USER_AGENT = "you-sir-age-int" - - def _get_target_class(self): - from google.cloud import spanner - - return spanner.Client - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def _constructor_test_helper( - self, - expected_scopes, - creds, - expected_creds=None, - client_info=None, - user_agent=None, - client_options=None, - ): - import google.api_core.client_options - from google.cloud.spanner_v1 import client as MUT - - kwargs = {} - - if client_info is not None: - kwargs["client_info"] = expected_client_info = client_info - else: - expected_client_info = MUT._CLIENT_INFO - - kwargs["client_options"] = client_options - if type(client_options) == dict: - expected_client_options = google.api_core.client_options.from_dict( - client_options - ) - else: - expected_client_options = client_options - - client = self._make_one( - project=self.PROJECT, credentials=creds, user_agent=user_agent, **kwargs - ) - - expected_creds = expected_creds or creds.with_scopes.return_value - self.assertIs(client._credentials, expected_creds) - - self.assertIs(client._credentials, expected_creds) - if expected_scopes is not None: - creds.with_scopes.assert_called_once_with(expected_scopes) - - self.assertEqual(client.project, self.PROJECT) - self.assertIs(client._client_info, expected_client_info) - self.assertEqual(client.user_agent, user_agent) - if expected_client_options is not None: - self.assertIsInstance( - client._client_options, google.api_core.client_options.ClientOptions - ) - self.assertEqual( - client._client_options.api_endpoint, - expected_client_options.api_endpoint, - ) - - def test_constructor_default_scopes(self): - from google.cloud.spanner_v1 import client as MUT - - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds) - - @mock.patch("warnings.warn") - def test_constructor_custom_user_agent_and_timeout(self, mock_warn): - from google.cloud.spanner_v1 import client as MUT - - CUSTOM_USER_AGENT = "custom-application" - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper( - expected_scopes, creds, user_agent=CUSTOM_USER_AGENT - ) - mock_warn.assert_called_once_with( - MUT._USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2 - ) - - def test_constructor_custom_client_info(self): - from google.cloud.spanner_v1 import client as MUT - - client_info = mock.Mock() - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds, client_info=client_info) - - def test_constructor_implicit_credentials(self): - creds = _make_credentials() - - patch = mock.patch("google.auth.default", return_value=(creds, None)) - with patch as default: - self._constructor_test_helper( - None, None, expected_creds=creds.with_scopes.return_value - ) - - default.assert_called_once_with() - - def test_constructor_credentials_wo_create_scoped(self): - creds = _make_credentials() - expected_scopes = None - self._constructor_test_helper(expected_scopes, creds) - - def test_constructor_custom_client_options_obj(self): - from google.api_core.client_options import ClientOptions - from google.cloud.spanner_v1 import client as MUT - - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper( - expected_scopes, - creds, - client_options=ClientOptions(api_endpoint="endpoint"), - ) - - def test_constructor_custom_client_options_dict(self): - from google.cloud.spanner_v1 import client as MUT - - expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) - creds = _make_credentials() - self._constructor_test_helper( - expected_scopes, creds, client_options={"api_endpoint": "endpoint"} - ) - - def test_instance_admin_api(self): - from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE - - credentials = _make_credentials() - client_info = mock.Mock() - client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - expected_scopes = (SPANNER_ADMIN_SCOPE,) - - inst_module = "google.cloud.spanner_v1.client.InstanceAdminClient" - with mock.patch(inst_module) as instance_admin_client: - api = client.instance_admin_api - - self.assertIs(api, instance_admin_client.return_value) - - # API instance is cached - again = client.instance_admin_api - self.assertIs(again, api) - - instance_admin_client.assert_called_once_with( - credentials=credentials.with_scopes.return_value, - client_info=client_info, - client_options=client_options, - ) - - credentials.with_scopes.assert_called_once_with(expected_scopes) - - def test_database_admin_api(self): - from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE - - credentials = _make_credentials() - client_info = mock.Mock() - client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - expected_scopes = (SPANNER_ADMIN_SCOPE,) - - db_module = "google.cloud.spanner_v1.client.DatabaseAdminClient" - with mock.patch(db_module) as database_admin_client: - api = client.database_admin_api - - self.assertIs(api, database_admin_client.return_value) - - # API instance is cached - again = client.database_admin_api - self.assertIs(again, api) - - database_admin_client.assert_called_once_with( - credentials=credentials.with_scopes.return_value, - client_info=client_info, - client_options=client_options, - ) - - credentials.with_scopes.assert_called_once_with(expected_scopes) - - def test_copy(self): - credentials = _make_credentials() - # Make sure it "already" is scoped. - credentials.requires_scopes = False - - client = self._make_one(project=self.PROJECT, credentials=credentials) - - new_client = client.copy() - self.assertIs(new_client._credentials, client._credentials) - self.assertEqual(new_client.project, client.project) - - def test_credentials_property(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - self.assertIs(client.credentials, credentials.with_scopes.return_value) - - def test_project_name_property(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - project_name = "projects/" + self.PROJECT - self.assertEqual(client.project_name, project_name) - - def test_list_instance_configs(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) - from google.cloud.spanner_v1.client import InstanceConfig - - api = instance_admin_client.InstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - client._instance_admin_api = api - - instance_config_pbs = spanner_instance_admin_pb2.ListInstanceConfigsResponse( - instance_configs=[ - spanner_instance_admin_pb2.InstanceConfig( - name=self.CONFIGURATION_NAME, display_name=self.DISPLAY_NAME - ) - ] - ) - - lic_api = api._inner_api_calls["list_instance_configs"] = mock.Mock( - return_value=instance_config_pbs - ) - - response = client.list_instance_configs() - instance_configs = list(response) - - instance_config = instance_configs[0] - self.assertIsInstance(instance_config, InstanceConfig) - self.assertEqual(instance_config.name, self.CONFIGURATION_NAME) - self.assertEqual(instance_config.display_name, self.DISPLAY_NAME) - - expected_metadata = [ - ("google-cloud-resource-prefix", client.project_name), - ("x-goog-request-params", "parent={}".format(client.project_name)), - ] - lic_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstanceConfigsRequest(parent=self.PATH), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) - - def test_list_instance_configs_w_options(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) - - api = instance_admin_client.InstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - client._instance_admin_api = api - - instance_config_pbs = spanner_instance_admin_pb2.ListInstanceConfigsResponse( - instance_configs=[ - spanner_instance_admin_pb2.InstanceConfig( - name=self.CONFIGURATION_NAME, display_name=self.DISPLAY_NAME - ) - ] - ) - - lic_api = api._inner_api_calls["list_instance_configs"] = mock.Mock( - return_value=instance_config_pbs - ) - - token = "token" - page_size = 42 - list(client.list_instance_configs(page_token=token, page_size=42)) - - expected_metadata = [ - ("google-cloud-resource-prefix", client.project_name), - ("x-goog-request-params", "parent={}".format(client.project_name)), - ] - lic_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstanceConfigsRequest( - parent=self.PATH, page_size=page_size, page_token=token - ), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) - - def test_instance_factory_defaults(self): - from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT - from google.cloud.spanner_v1.instance import Instance - - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - instance = client.instance(self.INSTANCE_ID) - - self.assertTrue(isinstance(instance, Instance)) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertIsNone(instance.configuration_name) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertEqual(instance.node_count, DEFAULT_NODE_COUNT) - self.assertIs(instance._client, client) - - def test_instance_factory_explicit(self): - from google.cloud.spanner_v1.instance import Instance - - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - instance = client.instance( - self.INSTANCE_ID, - self.CONFIGURATION_NAME, - display_name=self.DISPLAY_NAME, - node_count=self.NODE_COUNT, - ) - - self.assertTrue(isinstance(instance, Instance)) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.configuration_name, self.CONFIGURATION_NAME) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - self.assertIs(instance._client, client) - - def test_list_instances(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) - from google.cloud.spanner_v1.client import Instance - - api = instance_admin_client.InstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - client._instance_admin_api = api - - instance_pbs = spanner_instance_admin_pb2.ListInstancesResponse( - instances=[ - spanner_instance_admin_pb2.Instance( - name=self.INSTANCE_NAME, - config=self.CONFIGURATION_NAME, - display_name=self.DISPLAY_NAME, - node_count=self.NODE_COUNT, - ) - ] - ) - - li_api = api._inner_api_calls["list_instances"] = mock.Mock( - return_value=instance_pbs - ) - - response = client.list_instances() - instances = list(response) - - instance = instances[0] - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.configuration_name, self.CONFIGURATION_NAME) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - - expected_metadata = [ - ("google-cloud-resource-prefix", client.project_name), - ("x-goog-request-params", "parent={}".format(client.project_name)), - ] - li_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstancesRequest(parent=self.PATH), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) - - def test_list_instances_w_options(self): - from google.cloud.spanner_admin_instance_v1.gapic import instance_admin_client - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2, - ) - - api = instance_admin_client.InstanceAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - client._instance_admin_api = api - - instance_pbs = spanner_instance_admin_pb2.ListInstancesResponse(instances=[]) - - li_api = api._inner_api_calls["list_instances"] = mock.Mock( - return_value=instance_pbs - ) - - token = "token" - page_size = 42 - list(client.list_instances(page_token=token, page_size=42)) - - expected_metadata = [ - ("google-cloud-resource-prefix", client.project_name), - ("x-goog-request-params", "parent={}".format(client.project_name)), - ] - li_api.assert_called_once_with( - spanner_instance_admin_pb2.ListInstancesRequest( - parent=self.PATH, page_size=page_size, page_token=token - ), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) diff --git a/spanner/tests/unit/test_database.py b/spanner/tests/unit/test_database.py deleted file mode 100644 index 0f4071d8680b..000000000000 --- a/spanner/tests/unit/test_database.py +++ /dev/null @@ -1,1833 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - - -DML_WO_PARAM = """ -DELETE FROM citizens -""" - -DML_W_PARAM = """ -INSERT INTO citizens(first_name, last_name, age) -VALUES ("Phred", "Phlyntstone", @age) -""" -PARAMS = {"age": 30} -PARAM_TYPES = {"age": "INT64"} -MODE = 2 # PROFILE - - -def _make_credentials(): # pragma: NO COVER - import google.auth.credentials - - class _CredentialsWithScopes( - google.auth.credentials.Credentials, google.auth.credentials.Scoped - ): - pass - - return mock.Mock(spec=_CredentialsWithScopes) - - -class _BaseTest(unittest.TestCase): - - PROJECT_ID = "project-id" - PARENT = "projects/" + PROJECT_ID - INSTANCE_ID = "instance-id" - INSTANCE_NAME = PARENT + "/instances/" + INSTANCE_ID - DATABASE_ID = "database_id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session_id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - TRANSACTION_ID = b"transaction_id" - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _make_timestamp(): - import datetime - from google.cloud._helpers import UTC - - return datetime.datetime.utcnow().replace(tzinfo=UTC) - - @staticmethod - def _make_duration(seconds=1, microseconds=0): - import datetime - - return datetime.timedelta(seconds=seconds, microseconds=microseconds) - - -class TestDatabase(_BaseTest): - def _get_target_class(self): - from google.cloud.spanner_v1.database import Database - - return Database - - @staticmethod - def _make_database_admin_api(): - from google.cloud.spanner_v1.client import DatabaseAdminClient - - return mock.create_autospec(DatabaseAdminClient, instance=True) - - @staticmethod - def _make_spanner_api(): - import google.cloud.spanner_v1.gapic.spanner_client - - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) - - def test_ctor_defaults(self): - from google.cloud.spanner_v1.pool import BurstyPool - - instance = _Instance(self.INSTANCE_NAME) - - database = self._make_one(self.DATABASE_ID, instance) - - self.assertEqual(database.database_id, self.DATABASE_ID) - self.assertIs(database._instance, instance) - self.assertEqual(list(database.ddl_statements), []) - self.assertIsInstance(database._pool, BurstyPool) - # BurstyPool does not create sessions during 'bind()'. - self.assertTrue(database._pool._sessions.empty()) - - def test_ctor_w_explicit_pool(self): - instance = _Instance(self.INSTANCE_NAME) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - self.assertEqual(database.database_id, self.DATABASE_ID) - self.assertIs(database._instance, instance) - self.assertEqual(list(database.ddl_statements), []) - self.assertIs(database._pool, pool) - self.assertIs(pool._bound, database) - - def test_ctor_w_ddl_statements_non_string(self): - - with self.assertRaises(ValueError): - self._make_one( - self.DATABASE_ID, instance=object(), ddl_statements=[object()] - ) - - def test_ctor_w_ddl_statements_w_create_database(self): - - with self.assertRaises(ValueError): - self._make_one( - self.DATABASE_ID, - instance=object(), - ddl_statements=["CREATE DATABASE foo"], - ) - - def test_ctor_w_ddl_statements_ok(self): - from tests._fixtures import DDL_STATEMENTS - - instance = _Instance(self.INSTANCE_NAME) - pool = _Pool() - database = self._make_one( - self.DATABASE_ID, instance, ddl_statements=DDL_STATEMENTS, pool=pool - ) - self.assertEqual(database.database_id, self.DATABASE_ID) - self.assertIs(database._instance, instance) - self.assertEqual(list(database.ddl_statements), DDL_STATEMENTS) - - def test_from_pb_bad_database_name(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - - database_name = "INCORRECT_FORMAT" - database_pb = admin_v1_pb2.Database(name=database_name) - klass = self._get_target_class() - - with self.assertRaises(ValueError): - klass.from_pb(database_pb, None) - - def test_from_pb_project_mistmatch(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) - klass = self._get_target_class() - - with self.assertRaises(ValueError): - klass.from_pb(database_pb, instance) - - def test_from_pb_instance_mistmatch(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - - ALT_INSTANCE = "/projects/%s/instances/ALT-INSTANCE" % (self.PROJECT_ID,) - client = _Client() - instance = _Instance(ALT_INSTANCE, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) - klass = self._get_target_class() - - with self.assertRaises(ValueError): - klass.from_pb(database_pb, instance) - - def test_from_pb_success_w_explicit_pool(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=self.DATABASE_NAME) - klass = self._get_target_class() - pool = _Pool() - - database = klass.from_pb(database_pb, instance, pool=pool) - - self.assertTrue(isinstance(database, klass)) - self.assertEqual(database._instance, instance) - self.assertEqual(database.database_id, self.DATABASE_ID) - self.assertIs(database._pool, pool) - - def test_from_pb_success_w_hyphen_w_default_pool(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - from google.cloud.spanner_v1.pool import BurstyPool - - DATABASE_ID_HYPHEN = "database-id" - DATABASE_NAME_HYPHEN = self.INSTANCE_NAME + "/databases/" + DATABASE_ID_HYPHEN - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client) - database_pb = admin_v1_pb2.Database(name=DATABASE_NAME_HYPHEN) - klass = self._get_target_class() - - database = klass.from_pb(database_pb, instance) - - self.assertTrue(isinstance(database, klass)) - self.assertEqual(database._instance, instance) - self.assertEqual(database.database_id, DATABASE_ID_HYPHEN) - self.assertIsInstance(database._pool, BurstyPool) - # BurstyPool does not create sessions during 'bind()'. - self.assertTrue(database._pool._sessions.empty()) - - def test_name_property(self): - instance = _Instance(self.INSTANCE_NAME) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - expected_name = self.DATABASE_NAME - self.assertEqual(database.name, expected_name) - - def test_spanner_api_property_w_scopeless_creds(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client() - client.instance_admin_api.get_instance.return_value = admin_v1_pb2.Instance( - endpoint_uris=[] - ) - client_info = client._client_info = mock.Mock() - client_options = client._client_options = mock.Mock() - credentials = client.credentials = object() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - client.instance_admin_api.get_instance.assert_called_once() - spanner_client.assert_called_once_with( - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - - def test_spanner_api_w_scoped_creds(self): - import google.auth.credentials - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - from google.cloud.spanner_v1.database import SPANNER_DATA_SCOPE - - class _CredentialsWithScopes(google.auth.credentials.Scoped): - def __init__(self, scopes=(), source=None): - self._scopes = scopes - self._source = source - - def requires_scopes(self): # pragma: NO COVER - return True - - def with_scopes(self, scopes): - return self.__class__(scopes, self) - - expected_scopes = (SPANNER_DATA_SCOPE,) - client = _Client() - client_info = client._client_info = mock.Mock() - client_options = client._client_options = mock.Mock() - credentials = client.credentials = _CredentialsWithScopes() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - client.instance_admin_api.get_instance.return_value = admin_v1_pb2.Instance( - endpoint_uris=[] - ) - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with patch as spanner_client: - api = database.spanner_api - - self.assertNotIn(instance.name, client._endpoint_cache) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - client.instance_admin_api.get_instance.assert_called_once() - self.assertEqual(len(spanner_client.call_args_list), 1) - called_args, called_kw = spanner_client.call_args - self.assertEqual(called_args, ()) - self.assertEqual(called_kw["client_info"], client_info) - self.assertEqual(called_kw["client_options"], client_options) - scoped = called_kw["credentials"] - self.assertEqual(scoped._scopes, expected_scopes) - self.assertIs(scoped._source, credentials) - - def test_spanner_api_property_w_scopeless_creds_and_new_endpoint(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client() - client.instance_admin_api.get_instance.return_value = admin_v1_pb2.Instance( - endpoint_uris=["test1", "test2"] - ) - client_info = client._client_info = mock.Mock() - client._client_options = mock.Mock() - credentials = client.credentials = object() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - client_patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - options_patch = mock.patch("google.cloud.spanner_v1.database.ClientOptions") - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with options_patch as options: - with client_patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - self.assertIn(instance.name, client._endpoint_cache) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - self.assertEqual(len(spanner_client.call_args_list), 1) - called_args, called_kw = spanner_client.call_args - self.assertEqual(called_args, ()) - self.assertEqual(called_kw["client_info"], client_info) - self.assertEqual(called_kw["credentials"], credentials) - options.assert_called_with(api_endpoint="test1") - - def test_spanner_api_w_scoped_creds_and_new_endpoint(self): - import google.auth.credentials - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - from google.cloud.spanner_v1.database import SPANNER_DATA_SCOPE - - class _CredentialsWithScopes(google.auth.credentials.Scoped): - def __init__(self, scopes=(), source=None): - self._scopes = scopes - self._source = source - - def requires_scopes(self): # pragma: NO COVER - return True - - def with_scopes(self, scopes): - return self.__class__(scopes, self) - - expected_scopes = (SPANNER_DATA_SCOPE,) - client = _Client() - client_info = client._client_info = mock.Mock() - client._client_options = mock.Mock() - credentials = client.credentials = _CredentialsWithScopes() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - client_patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - options_patch = mock.patch("google.cloud.spanner_v1.database.ClientOptions") - client.instance_admin_api.get_instance.return_value = admin_v1_pb2.Instance( - endpoint_uris=["test1", "test2"] - ) - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with options_patch as options: - with client_patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - self.assertIn(instance.name, client._endpoint_cache) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - self.assertEqual(len(spanner_client.call_args_list), 1) - called_args, called_kw = spanner_client.call_args - self.assertEqual(called_args, ()) - self.assertEqual(called_kw["client_info"], client_info) - scoped = called_kw["credentials"] - self.assertEqual(scoped._scopes, expected_scopes) - self.assertIs(scoped._source, credentials) - options.assert_called_with(api_endpoint="test1") - - def test_spanner_api_resource_routing_permissions_error(self): - from google.api_core.exceptions import PermissionDenied - - client = _Client() - client_info = client._client_info = mock.Mock() - client_options = client._client_options = mock.Mock() - client._endpoint_cache = {} - credentials = client.credentials = mock.Mock() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - client.instance_admin_api.get_instance.side_effect = PermissionDenied("test") - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - client.instance_admin_api.get_instance.assert_called_once() - spanner_client.assert_called_once_with( - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - - def test_spanner_api_disable_resource_routing(self): - client = _Client() - client_info = client._client_info = mock.Mock() - client_options = client._client_options = mock.Mock() - client._endpoint_cache = {} - credentials = client.credentials = mock.Mock() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "false" - with patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - client.instance_admin_api.get_instance.assert_not_called() - spanner_client.assert_called_once_with( - credentials=credentials, - client_info=client_info, - client_options=client_options, - ) - - def test_spanner_api_cached_endpoint(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client() - client_info = client._client_info = mock.Mock() - client._client_options = mock.Mock() - client._endpoint_cache = {self.INSTANCE_NAME: "cached"} - credentials = client.credentials = mock.Mock() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - client_patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") - options_patch = mock.patch("google.cloud.spanner_v1.database.ClientOptions") - client.instance_admin_api.get_instance.return_value = admin_v1_pb2.Instance( - endpoint_uris=["test1", "test2"] - ) - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with options_patch as options: - with client_patch as spanner_client: - api = database.spanner_api - - self.assertIs(api, spanner_client.return_value) - - # API instance is cached - again = database.spanner_api - self.assertIs(again, api) - - self.assertEqual(len(spanner_client.call_args_list), 1) - called_args, called_kw = spanner_client.call_args - self.assertEqual(called_args, ()) - self.assertEqual(called_kw["client_info"], client_info) - self.assertEqual(called_kw["credentials"], credentials) - options.assert_called_with(api_endpoint="cached") - - def test_spanner_api_resource_routing_error(self): - from google.api_core.exceptions import GoogleAPIError - - client = _Client() - client._client_info = mock.Mock() - client._client_options = mock.Mock() - client.credentials = mock.Mock() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - client.instance_admin_api.get_instance.side_effect = GoogleAPIError("test") - - with mock.patch("os.getenv") as getenv: - getenv.return_value = "true" - with self.assertRaises(GoogleAPIError): - database.spanner_api - - client.instance_admin_api.get_instance.assert_called_once() - - def test___eq__(self): - instance = _Instance(self.INSTANCE_NAME) - pool1, pool2 = _Pool(), _Pool() - database1 = self._make_one(self.DATABASE_ID, instance, pool=pool1) - database2 = self._make_one(self.DATABASE_ID, instance, pool=pool2) - self.assertEqual(database1, database2) - - def test___eq__type_differ(self): - pool = _Pool() - database1 = self._make_one(self.DATABASE_ID, None, pool=pool) - database2 = object() - self.assertNotEqual(database1, database2) - - def test___ne__same_value(self): - instance = _Instance(self.INSTANCE_NAME) - pool1, pool2 = _Pool(), _Pool() - database1 = self._make_one(self.DATABASE_ID, instance, pool=pool1) - database2 = self._make_one(self.DATABASE_ID, instance, pool=pool2) - comparison_val = database1 != database2 - self.assertFalse(comparison_val) - - def test___ne__(self): - pool1, pool2 = _Pool(), _Pool() - database1 = self._make_one("database_id1", "instance1", pool=pool1) - database2 = self._make_one("database_id2", "instance2", pool=pool2) - self.assertNotEqual(database1, database2) - - def test_create_grpc_error(self): - from google.api_core.exceptions import GoogleAPICallError - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.create_database.side_effect = Unknown("testing") - - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(GoogleAPICallError): - database.create() - - api.create_database.assert_called_once_with( - parent=self.INSTANCE_NAME, - create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), - extra_statements=[], - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_create_already_exists(self): - from google.cloud.exceptions import Conflict - - DATABASE_ID_HYPHEN = "database-id" - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.create_database.side_effect = Conflict("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(DATABASE_ID_HYPHEN, instance, pool=pool) - - with self.assertRaises(Conflict): - database.create() - - api.create_database.assert_called_once_with( - parent=self.INSTANCE_NAME, - create_statement="CREATE DATABASE `{}`".format(DATABASE_ID_HYPHEN), - extra_statements=[], - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_create_instance_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.create_database.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(NotFound): - database.create() - - api.create_database.assert_called_once_with( - parent=self.INSTANCE_NAME, - create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), - extra_statements=[], - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_create_success(self): - from tests._fixtures import DDL_STATEMENTS - - op_future = object() - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.create_database.return_value = op_future - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one( - self.DATABASE_ID, instance, ddl_statements=DDL_STATEMENTS, pool=pool - ) - - future = database.create() - - self.assertIs(future, op_future) - - api.create_database.assert_called_once_with( - parent=self.INSTANCE_NAME, - create_statement="CREATE DATABASE {}".format(self.DATABASE_ID), - extra_statements=DDL_STATEMENTS, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_exists_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(Unknown): - database.exists() - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_exists_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - self.assertFalse(database.exists()) - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_exists_success(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - from tests._fixtures import DDL_STATEMENTS - - client = _Client() - ddl_pb = admin_v1_pb2.GetDatabaseDdlResponse(statements=DDL_STATEMENTS) - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.return_value = ddl_pb - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - self.assertTrue(database.exists()) - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_reload_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(Unknown): - database.reload() - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_reload_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(NotFound): - database.reload() - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_reload_success(self): - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2 as admin_v1_pb2, - ) - from tests._fixtures import DDL_STATEMENTS - - client = _Client() - ddl_pb = admin_v1_pb2.GetDatabaseDdlResponse(statements=DDL_STATEMENTS) - api = client.database_admin_api = self._make_database_admin_api() - api.get_database_ddl.return_value = ddl_pb - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - database.reload() - - self.assertEqual(database._ddl_statements, tuple(DDL_STATEMENTS)) - - api.get_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_update_ddl_grpc_error(self): - from google.api_core.exceptions import Unknown - from tests._fixtures import DDL_STATEMENTS - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.update_database_ddl.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(Unknown): - database.update_ddl(DDL_STATEMENTS) - - api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_update_ddl_not_found(self): - from google.cloud.exceptions import NotFound - from tests._fixtures import DDL_STATEMENTS - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.update_database_ddl.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(NotFound): - database.update_ddl(DDL_STATEMENTS) - - api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_update_ddl(self): - from tests._fixtures import DDL_STATEMENTS - - op_future = object() - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.update_database_ddl.return_value = op_future - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - future = database.update_ddl(DDL_STATEMENTS) - - self.assertIs(future, op_future) - - api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "", - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_update_ddl_w_operation_id(self): - from tests._fixtures import DDL_STATEMENTS - - op_future = object() - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.update_database_ddl.return_value = op_future - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - future = database.update_ddl(DDL_STATEMENTS, operation_id="someOperationId") - - self.assertIs(future, op_future) - - api.update_database_ddl.assert_called_once_with( - self.DATABASE_NAME, - DDL_STATEMENTS, - "someOperationId", - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_drop_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.drop_database.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(Unknown): - database.drop() - - api.drop_database.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_drop_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.drop_database.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - with self.assertRaises(NotFound): - database.drop() - - api.drop_database.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_drop_success(self): - from google.protobuf.empty_pb2 import Empty - - client = _Client() - api = client.database_admin_api = self._make_database_admin_api() - api.drop_database.return_value = Empty() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - database.drop() - - api.drop_database.assert_called_once_with( - self.DATABASE_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( - PartialResultSet, - ResultSetStats, - ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionSelector, - TransactionOptions, - ) - from google.cloud.spanner_v1._helpers import _make_value_pb - - transaction_pb = TransactionPB(id=self.TRANSACTION_ID) - - stats_pb = ResultSetStats(row_count_lower_bound=2) - result_sets = [PartialResultSet(stats=stats_pb)] - iterator = _MockIterator(*result_sets) - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - api = database._spanner_api = self._make_spanner_api() - api.begin_transaction.return_value = transaction_pb - api.execute_streaming_sql.return_value = iterator - - row_count = database.execute_partitioned_dml(dml, params, param_types) - - self.assertEqual(row_count, 2) - - txn_options = TransactionOptions( - partitioned_dml=TransactionOptions.PartitionedDml() - ) - - api.begin_transaction.assert_called_once_with( - session.name, - txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - if params: - expected_params = Struct( - fields={key: _make_value_pb(value) for (key, value) in params.items()} - ) - else: - expected_params = None - - expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) - - api.execute_streaming_sql.assert_called_once_with( - self.SESSION_NAME, - dml, - transaction=expected_transaction, - params=expected_params, - param_types=param_types, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_execute_partitioned_dml_wo_params(self): - self._execute_partitioned_dml_helper(dml=DML_WO_PARAM) - - def test_execute_partitioned_dml_w_params_wo_param_types(self): - with self.assertRaises(ValueError): - self._execute_partitioned_dml_helper(dml=DML_W_PARAM, params=PARAMS) - - def test_execute_partitioned_dml_w_params_and_param_types(self): - self._execute_partitioned_dml_helper( - dml=DML_W_PARAM, params=PARAMS, param_types=PARAM_TYPES - ) - - def test_session_factory_defaults(self): - from google.cloud.spanner_v1.session import Session - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - session = database.session() - - self.assertTrue(isinstance(session, Session)) - self.assertIs(session.session_id, None) - self.assertIs(session._database, database) - self.assertEqual(session.labels, {}) - - def test_session_factory_w_labels(self): - from google.cloud.spanner_v1.session import Session - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - labels = {"foo": "bar"} - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - session = database.session(labels=labels) - - self.assertTrue(isinstance(session, Session)) - self.assertIs(session.session_id, None) - self.assertIs(session._database, database) - self.assertEqual(session.labels, labels) - - def test_snapshot_defaults(self): - from google.cloud.spanner_v1.database import SnapshotCheckout - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - checkout = database.snapshot() - self.assertIsInstance(checkout, SnapshotCheckout) - self.assertIs(checkout._database, database) - self.assertEqual(checkout._kw, {}) - - def test_snapshot_w_read_timestamp_and_multi_use(self): - import datetime - from google.cloud._helpers import UTC - from google.cloud.spanner_v1.database import SnapshotCheckout - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - checkout = database.snapshot(read_timestamp=now, multi_use=True) - - self.assertIsInstance(checkout, SnapshotCheckout) - self.assertIs(checkout._database, database) - self.assertEqual(checkout._kw, {"read_timestamp": now, "multi_use": True}) - - def test_batch(self): - from google.cloud.spanner_v1.database import BatchCheckout - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - checkout = database.batch() - self.assertIsInstance(checkout, BatchCheckout) - self.assertIs(checkout._database, database) - - def test_batch_snapshot(self): - from google.cloud.spanner_v1.database import BatchSnapshot - - database = self._make_one(self.DATABASE_ID, instance=object(), pool=_Pool()) - - batch_txn = database.batch_snapshot() - self.assertIsInstance(batch_txn, BatchSnapshot) - self.assertIs(batch_txn._database, database) - self.assertIsNone(batch_txn._read_timestamp) - self.assertIsNone(batch_txn._exact_staleness) - - def test_batch_snapshot_w_read_timestamp(self): - from google.cloud.spanner_v1.database import BatchSnapshot - - database = self._make_one(self.DATABASE_ID, instance=object(), pool=_Pool()) - timestamp = self._make_timestamp() - - batch_txn = database.batch_snapshot(read_timestamp=timestamp) - self.assertIsInstance(batch_txn, BatchSnapshot) - self.assertIs(batch_txn._database, database) - self.assertEqual(batch_txn._read_timestamp, timestamp) - self.assertIsNone(batch_txn._exact_staleness) - - def test_batch_snapshot_w_exact_staleness(self): - from google.cloud.spanner_v1.database import BatchSnapshot - - database = self._make_one(self.DATABASE_ID, instance=object(), pool=_Pool()) - duration = self._make_duration() - - batch_txn = database.batch_snapshot(exact_staleness=duration) - self.assertIsInstance(batch_txn, BatchSnapshot) - self.assertIs(batch_txn._database, database) - self.assertIsNone(batch_txn._read_timestamp) - self.assertEqual(batch_txn._exact_staleness, duration) - - def test_run_in_transaction_wo_args(self): - import datetime - - NOW = datetime.datetime.now() - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - session._committed = NOW - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - _unit_of_work = object() - - committed = database.run_in_transaction(_unit_of_work) - - self.assertEqual(committed, NOW) - self.assertEqual(session._retried, (_unit_of_work, (), {})) - - def test_run_in_transaction_w_args(self): - import datetime - - SINCE = datetime.datetime(2017, 1, 1) - UNTIL = datetime.datetime(2018, 1, 1) - NOW = datetime.datetime.now() - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - pool = _Pool() - session = _Session() - pool.put(session) - session._committed = NOW - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - _unit_of_work = object() - - committed = database.run_in_transaction(_unit_of_work, SINCE, until=UNTIL) - - self.assertEqual(committed, NOW) - self.assertEqual(session._retried, (_unit_of_work, (SINCE,), {"until": UNTIL})) - - def test_run_in_transaction_nested(self): - from datetime import datetime - - # Perform the various setup tasks. - instance = _Instance(self.INSTANCE_NAME, client=_Client()) - pool = _Pool() - session = _Session(run_transaction_function=True) - session._committed = datetime.now() - pool.put(session) - database = self._make_one(self.DATABASE_ID, instance, pool=pool) - - # Define the inner function. - inner = mock.Mock(spec=()) - - # Define the nested transaction. - def nested_unit_of_work(): - return database.run_in_transaction(inner) - - # Attempting to run this transaction should raise RuntimeError. - with self.assertRaises(RuntimeError): - database.run_in_transaction(nested_unit_of_work) - self.assertEqual(inner.call_count, 0) - - -class TestBatchCheckout(_BaseTest): - def _get_target_class(self): - from google.cloud.spanner_v1.database import BatchCheckout - - return BatchCheckout - - @staticmethod - def _make_spanner_client(): - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient - - return mock.create_autospec(SpannerClient) - - def test_ctor(self): - database = _Database(self.DATABASE_NAME) - checkout = self._make_one(database) - self.assertIs(checkout._database, database) - - def test_context_mgr_success(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.batch import Batch - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database(self.DATABASE_NAME) - api = database.spanner_api = self._make_spanner_client() - api.commit.return_value = response - pool = database._pool = _Pool() - session = _Session(database) - pool.put(session) - checkout = self._make_one(database) - - with checkout as batch: - self.assertIsNone(pool._session) - self.assertIsInstance(batch, Batch) - self.assertIs(batch._session, session) - - self.assertIs(pool._session, session) - self.assertEqual(batch.committed, now) - - expected_txn_options = TransactionOptions(read_write={}) - - api.commit.assert_called_once_with( - self.SESSION_NAME, - mutations=[], - single_use_transaction=expected_txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_context_mgr_failure(self): - from google.cloud.spanner_v1.batch import Batch - - database = _Database(self.DATABASE_NAME) - pool = database._pool = _Pool() - session = _Session(database) - pool.put(session) - checkout = self._make_one(database) - - class Testing(Exception): - pass - - with self.assertRaises(Testing): - with checkout as batch: - self.assertIsNone(pool._session) - self.assertIsInstance(batch, Batch) - self.assertIs(batch._session, session) - raise Testing() - - self.assertIs(pool._session, session) - self.assertIsNone(batch.committed) - - -class TestSnapshotCheckout(_BaseTest): - def _get_target_class(self): - from google.cloud.spanner_v1.database import SnapshotCheckout - - return SnapshotCheckout - - def test_ctor_defaults(self): - from google.cloud.spanner_v1.snapshot import Snapshot - - database = _Database(self.DATABASE_NAME) - session = _Session(database) - pool = database._pool = _Pool() - pool.put(session) - - checkout = self._make_one(database) - self.assertIs(checkout._database, database) - self.assertEqual(checkout._kw, {}) - - with checkout as snapshot: - self.assertIsNone(pool._session) - self.assertIsInstance(snapshot, Snapshot) - self.assertIs(snapshot._session, session) - self.assertTrue(snapshot._strong) - self.assertFalse(snapshot._multi_use) - - self.assertIs(pool._session, session) - - def test_ctor_w_read_timestamp_and_multi_use(self): - import datetime - from google.cloud._helpers import UTC - from google.cloud.spanner_v1.snapshot import Snapshot - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - database = _Database(self.DATABASE_NAME) - session = _Session(database) - pool = database._pool = _Pool() - pool.put(session) - - checkout = self._make_one(database, read_timestamp=now, multi_use=True) - self.assertIs(checkout._database, database) - self.assertEqual(checkout._kw, {"read_timestamp": now, "multi_use": True}) - - with checkout as snapshot: - self.assertIsNone(pool._session) - self.assertIsInstance(snapshot, Snapshot) - self.assertIs(snapshot._session, session) - self.assertEqual(snapshot._read_timestamp, now) - self.assertTrue(snapshot._multi_use) - - self.assertIs(pool._session, session) - - def test_context_mgr_failure(self): - from google.cloud.spanner_v1.snapshot import Snapshot - - database = _Database(self.DATABASE_NAME) - pool = database._pool = _Pool() - session = _Session(database) - pool.put(session) - checkout = self._make_one(database) - - class Testing(Exception): - pass - - with self.assertRaises(Testing): - with checkout as snapshot: - self.assertIsNone(pool._session) - self.assertIsInstance(snapshot, Snapshot) - self.assertIs(snapshot._session, session) - raise Testing() - - self.assertIs(pool._session, session) - - -class TestBatchSnapshot(_BaseTest): - TABLE = "table_name" - COLUMNS = ["column_one", "column_two"] - TOKENS = [b"TOKEN1", b"TOKEN2"] - INDEX = "index" - - def _get_target_class(self): - from google.cloud.spanner_v1.database import BatchSnapshot - - return BatchSnapshot - - @staticmethod - def _make_database(**kwargs): - from google.cloud.spanner_v1.database import Database - - return mock.create_autospec(Database, instance=True, **kwargs) - - @staticmethod - def _make_session(**kwargs): - from google.cloud.spanner_v1.session import Session - - return mock.create_autospec(Session, instance=True, **kwargs) - - @staticmethod - def _make_snapshot(transaction_id=None, **kwargs): - from google.cloud.spanner_v1.snapshot import Snapshot - - snapshot = mock.create_autospec(Snapshot, instance=True, **kwargs) - if transaction_id is not None: - snapshot._transaction_id = transaction_id - - return snapshot - - @staticmethod - def _make_keyset(): - from google.cloud.spanner_v1.keyset import KeySet - - return KeySet(all_=True) - - def test_ctor_no_staleness(self): - database = self._make_database() - - batch_txn = self._make_one(database) - - self.assertIs(batch_txn._database, database) - self.assertIsNone(batch_txn._session) - self.assertIsNone(batch_txn._snapshot) - self.assertIsNone(batch_txn._read_timestamp) - self.assertIsNone(batch_txn._exact_staleness) - - def test_ctor_w_read_timestamp(self): - database = self._make_database() - timestamp = self._make_timestamp() - - batch_txn = self._make_one(database, read_timestamp=timestamp) - - self.assertIs(batch_txn._database, database) - self.assertIsNone(batch_txn._session) - self.assertIsNone(batch_txn._snapshot) - self.assertEqual(batch_txn._read_timestamp, timestamp) - self.assertIsNone(batch_txn._exact_staleness) - - def test_ctor_w_exact_staleness(self): - database = self._make_database() - duration = self._make_duration() - - batch_txn = self._make_one(database, exact_staleness=duration) - - self.assertIs(batch_txn._database, database) - self.assertIsNone(batch_txn._session) - self.assertIsNone(batch_txn._snapshot) - self.assertIsNone(batch_txn._read_timestamp) - self.assertEqual(batch_txn._exact_staleness, duration) - - def test_from_dict(self): - klass = self._get_target_class() - database = self._make_database() - session = database.session.return_value = self._make_session() - snapshot = session.snapshot.return_value = self._make_snapshot() - api_repr = { - "session_id": self.SESSION_ID, - "transaction_id": self.TRANSACTION_ID, - } - - batch_txn = klass.from_dict(database, api_repr) - self.assertIs(batch_txn._database, database) - self.assertIs(batch_txn._session, session) - self.assertEqual(session._session_id, self.SESSION_ID) - self.assertEqual(snapshot._transaction_id, self.TRANSACTION_ID) - snapshot.begin.assert_not_called() - self.assertIs(batch_txn._snapshot, snapshot) - - def test_to_dict(self): - database = self._make_database() - batch_txn = self._make_one(database) - batch_txn._session = self._make_session(_session_id=self.SESSION_ID) - batch_txn._snapshot = self._make_snapshot(transaction_id=self.TRANSACTION_ID) - - expected = { - "session_id": self.SESSION_ID, - "transaction_id": self.TRANSACTION_ID, - } - self.assertEqual(batch_txn.to_dict(), expected) - - def test__get_session_already(self): - database = self._make_database() - batch_txn = self._make_one(database) - already = batch_txn._session = object() - self.assertIs(batch_txn._get_session(), already) - - def test__get_session_new(self): - database = self._make_database() - session = database.session.return_value = self._make_session() - batch_txn = self._make_one(database) - self.assertIs(batch_txn._get_session(), session) - session.create.assert_called_once_with() - - def test__get_snapshot_already(self): - database = self._make_database() - batch_txn = self._make_one(database) - already = batch_txn._snapshot = self._make_snapshot() - self.assertIs(batch_txn._get_snapshot(), already) - already.begin.assert_not_called() - - def test__get_snapshot_new_wo_staleness(self): - database = self._make_database() - batch_txn = self._make_one(database) - session = batch_txn._session = self._make_session() - snapshot = session.snapshot.return_value = self._make_snapshot() - self.assertIs(batch_txn._get_snapshot(), snapshot) - session.snapshot.assert_called_once_with( - read_timestamp=None, exact_staleness=None, multi_use=True - ) - snapshot.begin.assert_called_once_with() - - def test__get_snapshot_w_read_timestamp(self): - database = self._make_database() - timestamp = self._make_timestamp() - batch_txn = self._make_one(database, read_timestamp=timestamp) - session = batch_txn._session = self._make_session() - snapshot = session.snapshot.return_value = self._make_snapshot() - self.assertIs(batch_txn._get_snapshot(), snapshot) - session.snapshot.assert_called_once_with( - read_timestamp=timestamp, exact_staleness=None, multi_use=True - ) - snapshot.begin.assert_called_once_with() - - def test__get_snapshot_w_exact_staleness(self): - database = self._make_database() - duration = self._make_duration() - batch_txn = self._make_one(database, exact_staleness=duration) - session = batch_txn._session = self._make_session() - snapshot = session.snapshot.return_value = self._make_snapshot() - self.assertIs(batch_txn._get_snapshot(), snapshot) - session.snapshot.assert_called_once_with( - read_timestamp=None, exact_staleness=duration, multi_use=True - ) - snapshot.begin.assert_called_once_with() - - def test_read(self): - keyset = self._make_keyset() - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - - rows = batch_txn.read(self.TABLE, self.COLUMNS, keyset, self.INDEX) - - self.assertIs(rows, snapshot.read.return_value) - snapshot.read.assert_called_once_with( - self.TABLE, self.COLUMNS, keyset, self.INDEX - ) - - def test_execute_sql(self): - sql = ( - "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age" - ) - params = {"max_age": 30} - param_types = {"max_age": "INT64"} - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - - rows = batch_txn.execute_sql(sql, params, param_types) - - self.assertIs(rows, snapshot.execute_sql.return_value) - snapshot.execute_sql.assert_called_once_with(sql, params, param_types) - - def test_generate_read_batches_w_max_partitions(self): - max_partitions = len(self.TOKENS) - keyset = self._make_keyset() - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - snapshot.partition_read.return_value = self.TOKENS - - batches = list( - batch_txn.generate_read_batches( - self.TABLE, self.COLUMNS, keyset, max_partitions=max_partitions - ) - ) - - expected_read = { - "table": self.TABLE, - "columns": self.COLUMNS, - "keyset": {"all": True}, - "index": "", - } - self.assertEqual(len(batches), len(self.TOKENS)) - for batch, token in zip(batches, self.TOKENS): - self.assertEqual(batch["partition"], token) - self.assertEqual(batch["read"], expected_read) - - snapshot.partition_read.assert_called_once_with( - table=self.TABLE, - columns=self.COLUMNS, - keyset=keyset, - index="", - partition_size_bytes=None, - max_partitions=max_partitions, - ) - - def test_generate_read_batches_w_index_w_partition_size_bytes(self): - size = 1 << 20 - keyset = self._make_keyset() - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - snapshot.partition_read.return_value = self.TOKENS - - batches = list( - batch_txn.generate_read_batches( - self.TABLE, - self.COLUMNS, - keyset, - index=self.INDEX, - partition_size_bytes=size, - ) - ) - - expected_read = { - "table": self.TABLE, - "columns": self.COLUMNS, - "keyset": {"all": True}, - "index": self.INDEX, - } - self.assertEqual(len(batches), len(self.TOKENS)) - for batch, token in zip(batches, self.TOKENS): - self.assertEqual(batch["partition"], token) - self.assertEqual(batch["read"], expected_read) - - snapshot.partition_read.assert_called_once_with( - table=self.TABLE, - columns=self.COLUMNS, - keyset=keyset, - index=self.INDEX, - partition_size_bytes=size, - max_partitions=None, - ) - - def test_process_read_batch(self): - keyset = self._make_keyset() - token = b"TOKEN" - batch = { - "partition": token, - "read": { - "table": self.TABLE, - "columns": self.COLUMNS, - "keyset": {"all": True}, - "index": self.INDEX, - }, - } - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - expected = snapshot.read.return_value = object() - - found = batch_txn.process_read_batch(batch) - - self.assertIs(found, expected) - - snapshot.read.assert_called_once_with( - table=self.TABLE, - columns=self.COLUMNS, - keyset=keyset, - index=self.INDEX, - partition=token, - ) - - def test_generate_query_batches_w_max_partitions(self): - sql = "SELECT COUNT(*) FROM table_name" - max_partitions = len(self.TOKENS) - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - snapshot.partition_query.return_value = self.TOKENS - - batches = list( - batch_txn.generate_query_batches(sql, max_partitions=max_partitions) - ) - - expected_query = {"sql": sql} - self.assertEqual(len(batches), len(self.TOKENS)) - for batch, token in zip(batches, self.TOKENS): - self.assertEqual(batch["partition"], token) - self.assertEqual(batch["query"], expected_query) - - snapshot.partition_query.assert_called_once_with( - sql=sql, - params=None, - param_types=None, - partition_size_bytes=None, - max_partitions=max_partitions, - ) - - def test_generate_query_batches_w_params_w_partition_size_bytes(self): - sql = ( - "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age" - ) - params = {"max_age": 30} - param_types = {"max_age": "INT64"} - size = 1 << 20 - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - snapshot.partition_query.return_value = self.TOKENS - - batches = list( - batch_txn.generate_query_batches( - sql, params=params, param_types=param_types, partition_size_bytes=size - ) - ) - - expected_query = {"sql": sql, "params": params, "param_types": param_types} - self.assertEqual(len(batches), len(self.TOKENS)) - for batch, token in zip(batches, self.TOKENS): - self.assertEqual(batch["partition"], token) - self.assertEqual(batch["query"], expected_query) - - snapshot.partition_query.assert_called_once_with( - sql=sql, - params=params, - param_types=param_types, - partition_size_bytes=size, - max_partitions=None, - ) - - def test_process_query_batch(self): - sql = ( - "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age" - ) - params = {"max_age": 30} - param_types = {"max_age": "INT64"} - token = b"TOKEN" - batch = { - "partition": token, - "query": {"sql": sql, "params": params, "param_types": param_types}, - } - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - expected = snapshot.execute_sql.return_value = object() - - found = batch_txn.process_query_batch(batch) - - self.assertIs(found, expected) - - snapshot.execute_sql.assert_called_once_with( - sql=sql, params=params, param_types=param_types, partition=token - ) - - def test_close_wo_session(self): - database = self._make_database() - batch_txn = self._make_one(database) - - batch_txn.close() # no raise - - def test_close_w_session(self): - database = self._make_database() - batch_txn = self._make_one(database) - session = batch_txn._session = self._make_session() - - batch_txn.close() - - session.delete.assert_called_once_with() - - def test_process_w_invalid_batch(self): - token = b"TOKEN" - batch = {"partition": token, "bogus": b"BOGUS"} - database = self._make_database() - batch_txn = self._make_one(database) - - with self.assertRaises(ValueError): - batch_txn.process(batch) - - def test_process_w_read_batch(self): - keyset = self._make_keyset() - token = b"TOKEN" - batch = { - "partition": token, - "read": { - "table": self.TABLE, - "columns": self.COLUMNS, - "keyset": {"all": True}, - "index": self.INDEX, - }, - } - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - expected = snapshot.read.return_value = object() - - found = batch_txn.process(batch) - - self.assertIs(found, expected) - - snapshot.read.assert_called_once_with( - table=self.TABLE, - columns=self.COLUMNS, - keyset=keyset, - index=self.INDEX, - partition=token, - ) - - def test_process_w_query_batch(self): - sql = ( - "SELECT first_name, last_name, email FROM citizens " "WHERE age <= @max_age" - ) - params = {"max_age": 30} - param_types = {"max_age": "INT64"} - token = b"TOKEN" - batch = { - "partition": token, - "query": {"sql": sql, "params": params, "param_types": param_types}, - } - database = self._make_database() - batch_txn = self._make_one(database) - snapshot = batch_txn._snapshot = self._make_snapshot() - expected = snapshot.execute_sql.return_value = object() - - found = batch_txn.process(batch) - - self.assertIs(found, expected) - - snapshot.execute_sql.assert_called_once_with( - sql=sql, params=params, param_types=param_types, partition=token - ) - - -def _make_instance_api(): - from google.cloud.spanner_admin_instance_v1.gapic.instance_admin_client import ( - InstanceAdminClient, - ) - - return mock.create_autospec(InstanceAdminClient) - - -class _Client(object): - def __init__(self, project=TestDatabase.PROJECT_ID): - self.project = project - self.project_name = "projects/" + self.project - self._endpoint_cache = {} - self.instance_admin_api = _make_instance_api() - - -class _Instance(object): - def __init__(self, name, client=None): - self.name = name - self.instance_id = name.rsplit("/", 1)[1] - self._client = client - - -class _Database(object): - def __init__(self, name, instance=None): - self.name = name - self.database_id = name.rsplit("/", 1)[1] - self._instance = instance - - -class _Pool(object): - _bound = None - - def bind(self, database): - self._bound = database - - def get(self): - session, self._session = self._session, None - return session - - def put(self, session): - self._session = session - - -class _Session(object): - - _rows = () - _created = False - _transaction = None - _snapshot = None - - def __init__( - self, database=None, name=_BaseTest.SESSION_NAME, run_transaction_function=False - ): - self._database = database - self.name = name - self._run_transaction_function = run_transaction_function - - def run_in_transaction(self, func, *args, **kw): - if self._run_transaction_function: - func(*args, **kw) - self._retried = (func, args, kw) - return self._committed - - -class _MockIterator(object): - def __init__(self, *values, **kw): - self._iter_values = iter(values) - self._fail_after = kw.pop("fail_after", False) - - def __iter__(self): - return self - - def __next__(self): - try: - return next(self._iter_values) - except StopIteration: - raise - - next = __next__ diff --git a/spanner/tests/unit/test_instance.py b/spanner/tests/unit/test_instance.py deleted file mode 100644 index 0e7bc99df479..000000000000 --- a/spanner/tests/unit/test_instance.py +++ /dev/null @@ -1,660 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class TestInstance(unittest.TestCase): - - PROJECT = "project" - PARENT = "projects/" + PROJECT - INSTANCE_ID = "instance-id" - INSTANCE_NAME = PARENT + "/instances/" + INSTANCE_ID - CONFIG_NAME = "configuration-name" - LOCATION = "projects/" + PROJECT + "/locations/" + CONFIG_NAME - DISPLAY_NAME = "display_name" - NODE_COUNT = 5 - OP_ID = 8915 - OP_NAME = "operations/projects/%s/instances/%soperations/%d" % ( - PROJECT, - INSTANCE_ID, - OP_ID, - ) - TABLE_ID = "table_id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - TIMEOUT_SECONDS = 1 - DATABASE_ID = "database_id" - DATABASE_NAME = "%s/databases/%s" % (INSTANCE_NAME, DATABASE_ID) - - def _getTargetClass(self): - from google.cloud.spanner_v1.instance import Instance - - return Instance - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT - - client = object() - instance = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertIs(instance._client, client) - self.assertIs(instance.configuration_name, None) - self.assertEqual(instance.node_count, DEFAULT_NODE_COUNT) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - - def test_constructor_non_default(self): - DISPLAY_NAME = "display_name" - client = object() - - instance = self._make_one( - self.INSTANCE_ID, - client, - configuration_name=self.CONFIG_NAME, - node_count=self.NODE_COUNT, - display_name=DISPLAY_NAME, - ) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertIs(instance._client, client) - self.assertEqual(instance.configuration_name, self.CONFIG_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - self.assertEqual(instance.display_name, DISPLAY_NAME) - - def test_copy(self): - DISPLAY_NAME = "display_name" - - client = _Client(self.PROJECT) - instance = self._make_one( - self.INSTANCE_ID, client, self.CONFIG_NAME, display_name=DISPLAY_NAME - ) - new_instance = instance.copy() - - # Make sure the client copy succeeded. - self.assertIsNot(new_instance._client, client) - self.assertEqual(new_instance._client, client) - # Make sure the client got copied to a new instance. - self.assertIsNot(instance, new_instance) - self.assertEqual(instance, new_instance) - - def test__update_from_pb_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - display_name = "display_name" - instance_pb = admin_v1_pb2.Instance(display_name=display_name) - - instance = self._make_one(None, None, None, None) - self.assertEqual(instance.display_name, None) - instance._update_from_pb(instance_pb) - self.assertEqual(instance.display_name, display_name) - - def test__update_from_pb_no_display_name(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - instance_pb = admin_v1_pb2.Instance() - instance = self._make_one(None, None, None, None) - self.assertEqual(instance.display_name, None) - with self.assertRaises(ValueError): - instance._update_from_pb(instance_pb) - self.assertEqual(instance.display_name, None) - - def test_from_pb_bad_instance_name(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - instance_name = "INCORRECT_FORMAT" - instance_pb = admin_v1_pb2.Instance(name=instance_name) - - klass = self._getTargetClass() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, None) - - def test_from_pb_project_mistmatch(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - - instance_pb = admin_v1_pb2.Instance(name=self.INSTANCE_NAME) - - klass = self._getTargetClass() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, client) - - def test_from_pb_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client(project=self.PROJECT) - - instance_pb = admin_v1_pb2.Instance( - name=self.INSTANCE_NAME, - config=self.CONFIG_NAME, - display_name=self.INSTANCE_ID, - ) - - klass = self._getTargetClass() - instance = klass.from_pb(instance_pb, client) - self.assertTrue(isinstance(instance, klass)) - self.assertEqual(instance._client, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.configuration_name, self.CONFIG_NAME) - - def test_name_property(self): - client = _Client(project=self.PROJECT) - - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - self.assertEqual(instance.name, self.INSTANCE_NAME) - - def test___eq__(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - instance2 = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - self.assertEqual(instance1, instance2) - - def test___eq__type_differ(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - instance2 = object() - self.assertNotEqual(instance1, instance2) - - def test___ne__same_value(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - instance2 = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - comparison_val = instance1 != instance2 - self.assertFalse(comparison_val) - - def test___ne__(self): - instance1 = self._make_one("instance_id1", "client1", self.CONFIG_NAME) - instance2 = self._make_one("instance_id2", "client2", self.CONFIG_NAME) - self.assertNotEqual(instance1, instance2) - - def test_create_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client(self.PROJECT) - client.instance_admin_api = _FauxInstanceAdminAPI(_rpc_error=True) - instance = self._make_one( - self.INSTANCE_ID, client, configuration_name=self.CONFIG_NAME - ) - - with self.assertRaises(Unknown): - instance.create() - - def test_create_already_exists(self): - from google.cloud.exceptions import Conflict - - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _create_instance_conflict=True - ) - instance = self._make_one( - self.INSTANCE_ID, client, configuration_name=self.CONFIG_NAME - ) - - with self.assertRaises(Conflict): - instance.create() - - (parent, instance_id, instance, metadata) = api._created_instance - self.assertEqual(parent, self.PARENT) - self.assertEqual(instance_id, self.INSTANCE_ID) - self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.config, self.CONFIG_NAME) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertEqual(instance.node_count, 1) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_create_success(self): - op_future = _FauxOperationFuture() - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _create_instance_response=op_future - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - configuration_name=self.CONFIG_NAME, - display_name=self.DISPLAY_NAME, - node_count=self.NODE_COUNT, - ) - - future = instance.create() - - self.assertIs(future, op_future) - - (parent, instance_id, instance, metadata) = api._created_instance - self.assertEqual(parent, self.PARENT) - self.assertEqual(instance_id, self.INSTANCE_ID) - self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.config, self.CONFIG_NAME) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_exists_instance_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client(self.PROJECT) - client.instance_admin_api = _FauxInstanceAdminAPI(_rpc_error=True) - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - - with self.assertRaises(Unknown): - instance.exists() - - def test_exists_instance_not_found(self): - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _instance_not_found=True - ) - api._instance_not_found = True - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - - self.assertFalse(instance.exists()) - - name, metadata = api._got_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_exists_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client(self.PROJECT) - instance_pb = admin_v1_pb2.Instance( - name=self.INSTANCE_NAME, - config=self.CONFIG_NAME, - display_name=self.DISPLAY_NAME, - node_count=self.NODE_COUNT, - ) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _get_instance_response=instance_pb - ) - instance = self._make_one(self.INSTANCE_ID, client) - - self.assertTrue(instance.exists()) - - name, metadata = api._got_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_reload_instance_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client(self.PROJECT) - client.instance_admin_api = _FauxInstanceAdminAPI(_rpc_error=True) - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - - with self.assertRaises(Unknown): - instance.reload() - - def test_reload_instance_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _instance_not_found=True - ) - api._instance_not_found = True - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - - with self.assertRaises(NotFound): - instance.reload() - - name, metadata = api._got_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_reload_success(self): - from google.cloud.spanner_admin_instance_v1.proto import ( - spanner_instance_admin_pb2 as admin_v1_pb2, - ) - - client = _Client(self.PROJECT) - instance_pb = admin_v1_pb2.Instance( - name=self.INSTANCE_NAME, - config=self.CONFIG_NAME, - display_name=self.DISPLAY_NAME, - node_count=self.NODE_COUNT, - ) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _get_instance_response=instance_pb - ) - instance = self._make_one(self.INSTANCE_ID, client) - - instance.reload() - - self.assertEqual(instance.configuration_name, self.CONFIG_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - - name, metadata = api._got_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_update_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client(self.PROJECT) - client.instance_admin_api = _FauxInstanceAdminAPI(_rpc_error=True) - instance = self._make_one( - self.INSTANCE_ID, client, configuration_name=self.CONFIG_NAME - ) - - with self.assertRaises(Unknown): - instance.update() - - def test_update_not_found(self): - from google.cloud.exceptions import NotFound - from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT - - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _instance_not_found=True - ) - instance = self._make_one( - self.INSTANCE_ID, client, configuration_name=self.CONFIG_NAME - ) - - with self.assertRaises(NotFound): - instance.update() - - instance, field_mask, metadata = api._updated_instance - self.assertEqual(field_mask.paths, ["config", "display_name", "node_count"]) - self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.config, self.CONFIG_NAME) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertEqual(instance.node_count, DEFAULT_NODE_COUNT) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_update_success(self): - op_future = _FauxOperationFuture() - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _update_instance_response=op_future - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - configuration_name=self.CONFIG_NAME, - node_count=self.NODE_COUNT, - display_name=self.DISPLAY_NAME, - ) - - future = instance.update() - - self.assertIs(future, op_future) - - instance, field_mask, metadata = api._updated_instance - self.assertEqual(field_mask.paths, ["config", "display_name", "node_count"]) - self.assertEqual(instance.name, self.INSTANCE_NAME) - self.assertEqual(instance.config, self.CONFIG_NAME) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.node_count, self.NODE_COUNT) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_delete_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client(self.PROJECT) - client.instance_admin_api = _FauxInstanceAdminAPI(_rpc_error=True) - instance = self._make_one(self.INSTANCE_ID, client) - - with self.assertRaises(Unknown): - instance.delete() - - def test_delete_not_found(self): - from google.cloud.exceptions import NotFound - - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _instance_not_found=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - with self.assertRaises(NotFound): - instance.delete() - - name, metadata = api._deleted_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_delete_success(self): - from google.protobuf.empty_pb2 import Empty - - client = _Client(self.PROJECT) - api = client.instance_admin_api = _FauxInstanceAdminAPI( - _delete_instance_response=Empty() - ) - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - - instance.delete() - - name, metadata = api._deleted_instance - self.assertEqual(name, self.INSTANCE_NAME) - self.assertEqual(metadata, [("google-cloud-resource-prefix", instance.name)]) - - def test_database_factory_defaults(self): - from google.cloud.spanner_v1.database import Database - from google.cloud.spanner_v1.pool import BurstyPool - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - DATABASE_ID = "database-id" - - database = instance.database(DATABASE_ID) - - self.assertTrue(isinstance(database, Database)) - self.assertEqual(database.database_id, DATABASE_ID) - self.assertIs(database._instance, instance) - self.assertEqual(list(database.ddl_statements), []) - self.assertIsInstance(database._pool, BurstyPool) - pool = database._pool - self.assertIs(pool._database, database) - - def test_database_factory_explicit(self): - from google.cloud.spanner_v1.database import Database - from tests._fixtures import DDL_STATEMENTS - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) - DATABASE_ID = "database-id" - pool = _Pool() - - database = instance.database( - DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool - ) - - self.assertTrue(isinstance(database, Database)) - self.assertEqual(database.database_id, DATABASE_ID) - self.assertIs(database._instance, instance) - self.assertEqual(list(database.ddl_statements), DDL_STATEMENTS) - self.assertIs(database._pool, pool) - self.assertIs(pool._bound, database) - - def test_list_databases(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, - ) - from google.cloud.spanner_v1.database import Database - - api = database_admin_client.DatabaseAdminClient(mock.Mock()) - client = _Client(self.PROJECT) - client.database_admin_api = api - instance = self._make_one(self.INSTANCE_ID, client) - - databases_pb = spanner_database_admin_pb2.ListDatabasesResponse( - databases=[ - spanner_database_admin_pb2.Database( - name="{}/databases/aa".format(self.INSTANCE_NAME) - ), - spanner_database_admin_pb2.Database( - name="{}/databases/bb".format(self.INSTANCE_NAME) - ), - ] - ) - - ld_api = api._inner_api_calls["list_databases"] = mock.Mock( - return_value=databases_pb - ) - - response = instance.list_databases() - databases = list(response) - - self.assertIsInstance(databases[0], Database) - self.assertTrue(databases[0].name.endswith("/aa")) - self.assertTrue(databases[1].name.endswith("/bb")) - - expected_metadata = [ - ("google-cloud-resource-prefix", instance.name), - ("x-goog-request-params", "parent={}".format(instance.name)), - ] - ld_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabasesRequest(parent=self.INSTANCE_NAME), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) - - def test_list_databases_w_options(self): - from google.cloud.spanner_admin_database_v1.gapic import database_admin_client - from google.cloud.spanner_admin_database_v1.proto import ( - spanner_database_admin_pb2, - ) - - api = database_admin_client.DatabaseAdminClient(mock.Mock()) - client = _Client(self.PROJECT) - client.database_admin_api = api - instance = self._make_one(self.INSTANCE_ID, client) - - databases_pb = spanner_database_admin_pb2.ListDatabasesResponse(databases=[]) - - ld_api = api._inner_api_calls["list_databases"] = mock.Mock( - return_value=databases_pb - ) - - page_size = 42 - page_token = "token" - response = instance.list_databases(page_size=page_size, page_token=page_token) - databases = list(response) - - self.assertEqual(databases, []) - - expected_metadata = [ - ("google-cloud-resource-prefix", instance.name), - ("x-goog-request-params", "parent={}".format(instance.name)), - ] - ld_api.assert_called_once_with( - spanner_database_admin_pb2.ListDatabasesRequest( - parent=self.INSTANCE_NAME, page_size=page_size, page_token=page_token - ), - metadata=expected_metadata, - retry=mock.ANY, - timeout=mock.ANY, - ) - - -class _Client(object): - def __init__(self, project, timeout_seconds=None): - self.project = project - self.project_name = "projects/" + self.project - self.timeout_seconds = timeout_seconds - - def copy(self): - from copy import deepcopy - - return deepcopy(self) - - def __eq__(self, other): - return ( - other.project == self.project - and other.project_name == self.project_name - and other.timeout_seconds == self.timeout_seconds - ) - - -class _FauxInstanceAdminAPI(object): - - _create_instance_conflict = False - _instance_not_found = False - _rpc_error = False - - def __init__(self, **kwargs): - self.__dict__.update(**kwargs) - - def create_instance(self, parent, instance_id, instance, metadata=None): - from google.api_core.exceptions import AlreadyExists, Unknown - - self._created_instance = (parent, instance_id, instance, metadata) - if self._rpc_error: - raise Unknown("error") - if self._create_instance_conflict: - raise AlreadyExists("conflict") - return self._create_instance_response - - def get_instance(self, name, metadata=None): - from google.api_core.exceptions import NotFound, Unknown - - self._got_instance = (name, metadata) - if self._rpc_error: - raise Unknown("error") - if self._instance_not_found: - raise NotFound("error") - return self._get_instance_response - - def update_instance(self, instance, field_mask, metadata=None): - from google.api_core.exceptions import NotFound, Unknown - - self._updated_instance = (instance, field_mask, metadata) - if self._rpc_error: - raise Unknown("error") - if self._instance_not_found: - raise NotFound("error") - return self._update_instance_response - - def delete_instance(self, name, metadata=None): - from google.api_core.exceptions import NotFound, Unknown - - self._deleted_instance = name, metadata - if self._rpc_error: - raise Unknown("error") - if self._instance_not_found: - raise NotFound("error") - return self._delete_instance_response - - -class _FauxOperationFuture(object): - pass - - -class _Pool(object): - _bound = None - - def bind(self, database): - self._bound = database diff --git a/spanner/tests/unit/test_keyset.py b/spanner/tests/unit/test_keyset.py deleted file mode 100644 index ed1473bf01e3..000000000000 --- a/spanner/tests/unit/test_keyset.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - - -class TestKeyRange(unittest.TestCase): - def _get_target_class(self): - from google.cloud.spanner_v1.keyset import KeyRange - - return KeyRange - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_ctor_no_start_no_end(self): - with self.assertRaises(ValueError): - self._make_one() - - def test_ctor_w_start_open_and_start_closed(self): - KEY_1 = [u"key_1"] - KEY_2 = [u"key_2"] - with self.assertRaises(ValueError): - self._make_one(start_open=KEY_1, start_closed=KEY_2) - - def test_ctor_w_end_open_and_end_closed(self): - KEY_1 = [u"key_1"] - KEY_2 = [u"key_2"] - with self.assertRaises(ValueError): - self._make_one(end_open=KEY_1, end_closed=KEY_2) - - def test_ctor_w_only_start_open(self): - KEY_1 = [u"key_1"] - krange = self._make_one(start_open=KEY_1) - self.assertEqual(krange.start_open, KEY_1) - self.assertEqual(krange.start_closed, None) - self.assertEqual(krange.end_open, None) - self.assertEqual(krange.end_closed, []) - - def test_ctor_w_only_start_closed(self): - KEY_1 = [u"key_1"] - krange = self._make_one(start_closed=KEY_1) - self.assertEqual(krange.start_open, None) - self.assertEqual(krange.start_closed, KEY_1) - self.assertEqual(krange.end_open, None) - self.assertEqual(krange.end_closed, []) - - def test_ctor_w_only_end_open(self): - KEY_1 = [u"key_1"] - krange = self._make_one(end_open=KEY_1) - self.assertEqual(krange.start_open, None) - self.assertEqual(krange.start_closed, []) - self.assertEqual(krange.end_open, KEY_1) - self.assertEqual(krange.end_closed, None) - - def test_ctor_w_only_end_closed(self): - KEY_1 = [u"key_1"] - krange = self._make_one(end_closed=KEY_1) - self.assertEqual(krange.start_open, None) - self.assertEqual(krange.start_closed, []) - self.assertEqual(krange.end_open, None) - self.assertEqual(krange.end_closed, KEY_1) - - def test_ctor_w_start_open_and_end_closed(self): - KEY_1 = [u"key_1"] - KEY_2 = [u"key_2"] - krange = self._make_one(start_open=KEY_1, end_closed=KEY_2) - self.assertEqual(krange.start_open, KEY_1) - self.assertEqual(krange.start_closed, None) - self.assertEqual(krange.end_open, None) - self.assertEqual(krange.end_closed, KEY_2) - - def test_ctor_w_start_closed_and_end_open(self): - KEY_1 = [u"key_1"] - KEY_2 = [u"key_2"] - krange = self._make_one(start_closed=KEY_1, end_open=KEY_2) - self.assertEqual(krange.start_open, None) - self.assertEqual(krange.start_closed, KEY_1) - self.assertEqual(krange.end_open, KEY_2) - self.assertEqual(krange.end_closed, None) - - def test___eq___self(self): - key_1 = [u"key_1"] - krange = self._make_one(end_open=key_1) - self.assertEqual(krange, krange) - - def test___eq___other_type(self): - key_1 = [u"key_1"] - krange = self._make_one(end_open=key_1) - self.assertNotEqual(krange, object()) - - def test___eq___other_hit(self): - key_1 = [u"key_1"] - krange = self._make_one(end_open=key_1) - other = self._make_one(end_open=key_1) - self.assertEqual(krange, other) - - def test___eq___other(self): - key_1 = [u"key_1"] - key_2 = [u"key_2"] - krange = self._make_one(end_open=key_1) - other = self._make_one(start_closed=key_2, end_open=key_1) - self.assertNotEqual(krange, other) - - def test_to_pb_w_start_closed_and_end_open(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange - - key1 = u"key_1" - key2 = u"key_2" - key_range = self._make_one(start_closed=[key1], end_open=[key2]) - key_range_pb = key_range._to_pb() - expected = KeyRange( - start_closed=ListValue(values=[Value(string_value=key1)]), - end_open=ListValue(values=[Value(string_value=key2)]), - ) - self.assertEqual(key_range_pb, expected) - - def test_to_pb_w_start_open_and_end_closed(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange - - key1 = u"key_1" - key2 = u"key_2" - key_range = self._make_one(start_open=[key1], end_closed=[key2]) - key_range_pb = key_range._to_pb() - expected = KeyRange( - start_open=ListValue(values=[Value(string_value=key1)]), - end_closed=ListValue(values=[Value(string_value=key2)]), - ) - self.assertEqual(key_range_pb, expected) - - def test_to_pb_w_empty_list(self): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange - - key = u"key" - key_range = self._make_one(start_closed=[], end_closed=[key]) - key_range_pb = key_range._to_pb() - expected = KeyRange( - start_closed=ListValue(values=[]), - end_closed=ListValue(values=[Value(string_value=key)]), - ) - self.assertEqual(key_range_pb, expected) - - def test_to_dict_w_start_closed_and_end_open(self): - key1 = u"key_1" - key2 = u"key_2" - key_range = self._make_one(start_closed=[key1], end_open=[key2]) - expected = {"start_closed": [key1], "end_open": [key2]} - self.assertEqual(key_range._to_dict(), expected) - - def test_to_dict_w_start_open_and_end_closed(self): - key1 = u"key_1" - key2 = u"key_2" - key_range = self._make_one(start_open=[key1], end_closed=[key2]) - expected = {"start_open": [key1], "end_closed": [key2]} - self.assertEqual(key_range._to_dict(), expected) - - def test_to_dict_w_end_closed(self): - key = u"key" - key_range = self._make_one(end_closed=[key]) - expected = {"end_closed": [key]} - self.assertEqual(key_range._to_dict(), expected) - - -class TestKeySet(unittest.TestCase): - def _get_target_class(self): - from google.cloud.spanner_v1.keyset import KeySet - - return KeySet - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_ctor_w_all(self): - keyset = self._make_one(all_=True) - - self.assertTrue(keyset.all_) - self.assertEqual(keyset.keys, []) - self.assertEqual(keyset.ranges, []) - - def test_ctor_w_keys(self): - KEYS = [[u"key1"], [u"key2"]] - - keyset = self._make_one(keys=KEYS) - - self.assertFalse(keyset.all_) - self.assertEqual(keyset.keys, KEYS) - self.assertEqual(keyset.ranges, []) - - def test_ctor_w_ranges(self): - from google.cloud.spanner_v1.keyset import KeyRange - - range_1 = KeyRange(start_closed=[u"key1"], end_open=[u"key3"]) - range_2 = KeyRange(start_open=[u"key5"], end_closed=[u"key6"]) - - keyset = self._make_one(ranges=[range_1, range_2]) - - self.assertFalse(keyset.all_) - self.assertEqual(keyset.keys, []) - self.assertEqual(keyset.ranges, [range_1, range_2]) - - def test_ctor_w_all_and_keys(self): - - with self.assertRaises(ValueError): - self._make_one(all_=True, keys=[["key1"], ["key2"]]) - - def test_ctor_w_all_and_ranges(self): - from google.cloud.spanner_v1.keyset import KeyRange - - range_1 = KeyRange(start_closed=[u"key1"], end_open=[u"key3"]) - range_2 = KeyRange(start_open=[u"key5"], end_closed=[u"key6"]) - - with self.assertRaises(ValueError): - self._make_one(all_=True, ranges=[range_1, range_2]) - - def test___eq___w_self(self): - keyset = self._make_one(all_=True) - self.assertEqual(keyset, keyset) - - def test___eq___w_other_type(self): - keyset = self._make_one(all_=True) - self.assertNotEqual(keyset, object()) - - def test___eq___w_all_hit(self): - keyset = self._make_one(all_=True) - other = self._make_one(all_=True) - self.assertEqual(keyset, other) - - def test___eq___w_all_miss(self): - keys = [[u"key1"], [u"key2"]] - keyset = self._make_one(all_=True) - other = self._make_one(keys=keys) - self.assertNotEqual(keyset, other) - - def test___eq___w_keys_hit(self): - keys = [[u"key1"], [u"key2"]] - - keyset = self._make_one(keys=keys) - other = self._make_one(keys=keys) - - self.assertEqual(keyset, other) - - def test___eq___w_keys_miss(self): - keys = [[u"key1"], [u"key2"]] - - keyset = self._make_one(keys=keys[:1]) - other = self._make_one(keys=keys[1:]) - - self.assertNotEqual(keyset, other) - - def test___eq___w_ranges_hit(self): - from google.cloud.spanner_v1.keyset import KeyRange - - range_1 = KeyRange(start_closed=[u"key1"], end_open=[u"key3"]) - range_2 = KeyRange(start_open=[u"key5"], end_closed=[u"key6"]) - - keyset = self._make_one(ranges=[range_1, range_2]) - other = self._make_one(ranges=[range_1, range_2]) - - self.assertEqual(keyset, other) - - def test___eq___w_ranges_miss(self): - from google.cloud.spanner_v1.keyset import KeyRange - - range_1 = KeyRange(start_closed=[u"key1"], end_open=[u"key3"]) - range_2 = KeyRange(start_open=[u"key5"], end_closed=[u"key6"]) - - keyset = self._make_one(ranges=[range_1]) - other = self._make_one(ranges=[range_2]) - - self.assertNotEqual(keyset, other) - - def test_to_pb_w_all(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet - - keyset = self._make_one(all_=True) - - result = keyset._to_pb() - - self.assertIsInstance(result, KeySet) - self.assertTrue(result.all) - self.assertEqual(len(result.keys), 0) - self.assertEqual(len(result.ranges), 0) - - def test_to_pb_w_only_keys(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet - - KEYS = [[u"key1"], [u"key2"]] - keyset = self._make_one(keys=KEYS) - - result = keyset._to_pb() - - self.assertIsInstance(result, KeySet) - self.assertFalse(result.all) - self.assertEqual(len(result.keys), len(KEYS)) - - for found, expected in zip(result.keys, KEYS): - self.assertEqual(len(found), len(expected)) - self.assertEqual(found.values[0].string_value, expected[0]) - - self.assertEqual(len(result.ranges), 0) - - def test_to_pb_w_only_ranges(self): - from google.cloud.spanner_v1.proto.keys_pb2 import KeySet - from google.cloud.spanner_v1.keyset import KeyRange - - KEY_1 = u"KEY_1" - KEY_2 = u"KEY_2" - KEY_3 = u"KEY_3" - KEY_4 = u"KEY_4" - RANGES = [ - KeyRange(start_open=KEY_1, end_closed=KEY_2), - KeyRange(start_closed=KEY_3, end_open=KEY_4), - ] - keyset = self._make_one(ranges=RANGES) - - result = keyset._to_pb() - - self.assertIsInstance(result, KeySet) - self.assertFalse(result.all) - self.assertEqual(len(result.keys), 0) - self.assertEqual(len(result.ranges), len(RANGES)) - - for found, expected in zip(result.ranges, RANGES): - self.assertEqual(found, expected._to_pb()) - - def test_to_dict_w_all(self): - keyset = self._make_one(all_=True) - expected = {"all": True} - self.assertEqual(keyset._to_dict(), expected) - - def test_to_dict_w_only_keys(self): - KEYS = [[u"key1"], [u"key2"]] - keyset = self._make_one(keys=KEYS) - - expected = {"keys": KEYS, "ranges": []} - self.assertEqual(keyset._to_dict(), expected) - - def test_to_dict_w_only_ranges(self): - from google.cloud.spanner_v1.keyset import KeyRange - - key_1 = u"KEY_1" - key_2 = u"KEY_2" - key_3 = u"KEY_3" - key_4 = u"KEY_4" - ranges = [ - KeyRange(start_open=[key_1], end_closed=[key_2]), - KeyRange(start_closed=[key_3], end_open=[key_4]), - ] - keyset = self._make_one(ranges=ranges) - - expected = { - "keys": [], - "ranges": [ - {"start_open": [key_1], "end_closed": [key_2]}, - {"start_closed": [key_3], "end_open": [key_4]}, - ], - } - self.assertEqual(keyset._to_dict(), expected) - - def test_from_dict_w_all(self): - klass = self._get_target_class() - mapping = {"all": True} - - keyset = klass._from_dict(mapping) - - self.assertTrue(keyset.all_) - self.assertEqual(keyset.keys, []) - self.assertEqual(keyset.ranges, []) - - def test_from_dict_w_keys(self): - klass = self._get_target_class() - keys = [[u"key1"], [u"key2"]] - mapping = {"keys": keys} - - keyset = klass._from_dict(mapping) - - self.assertFalse(keyset.all_) - self.assertEqual(keyset.keys, keys) - self.assertEqual(keyset.ranges, []) - - def test_from_dict_w_ranges(self): - from google.cloud.spanner_v1.keyset import KeyRange - - klass = self._get_target_class() - key_1 = u"KEY_1" - key_2 = u"KEY_2" - key_3 = u"KEY_3" - key_4 = u"KEY_4" - mapping = { - "ranges": [ - {"start_open": [key_1], "end_closed": [key_2]}, - {"start_closed": [key_3], "end_open": [key_4]}, - ] - } - - keyset = klass._from_dict(mapping) - - range_1 = KeyRange(start_open=[key_1], end_closed=[key_2]) - range_2 = KeyRange(start_closed=[key_3], end_open=[key_4]) - - self.assertFalse(keyset.all_) - self.assertEqual(keyset.keys, []) - self.assertEqual(keyset.ranges, [range_1, range_2]) diff --git a/spanner/tests/unit/test_param_types.py b/spanner/tests/unit/test_param_types.py deleted file mode 100644 index cb1c548af9e7..000000000000 --- a/spanner/tests/unit/test_param_types.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2017 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - - -class Test_ArrayParamType(unittest.TestCase): - def test_it(self): - from google.cloud.spanner_v1.proto import type_pb2 - from google.cloud.spanner_v1 import param_types - - expected = type_pb2.Type( - code=type_pb2.ARRAY, array_element_type=type_pb2.Type(code=type_pb2.INT64) - ) - - found = param_types.Array(param_types.INT64) - - self.assertEqual(found, expected) - - -class Test_Struct(unittest.TestCase): - def test_it(self): - from google.cloud.spanner_v1.proto import type_pb2 - from google.cloud.spanner_v1 import param_types - - struct_type = type_pb2.StructType( - fields=[ - type_pb2.StructType.Field( - name="name", type=type_pb2.Type(code=type_pb2.STRING) - ), - type_pb2.StructType.Field( - name="count", type=type_pb2.Type(code=type_pb2.INT64) - ), - ] - ) - expected = type_pb2.Type(code=type_pb2.STRUCT, struct_type=struct_type) - - found = param_types.Struct( - [ - param_types.StructField("name", param_types.STRING), - param_types.StructField("count", param_types.INT64), - ] - ) - - self.assertEqual(found, expected) diff --git a/spanner/tests/unit/test_pool.py b/spanner/tests/unit/test_pool.py deleted file mode 100644 index 2d4a9d882291..000000000000 --- a/spanner/tests/unit/test_pool.py +++ /dev/null @@ -1,937 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from functools import total_ordering -import unittest - -import mock - - -def _make_database(name="name"): - from google.cloud.spanner_v1.database import Database - - return mock.create_autospec(Database, instance=True) - - -def _make_session(): - from google.cloud.spanner_v1.database import Session - - return mock.create_autospec(Session, instance=True) - - -class TestAbstractSessionPool(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import AbstractSessionPool - - return AbstractSessionPool - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - pool = self._make_one() - self.assertIsNone(pool._database) - self.assertEqual(pool.labels, {}) - - def test_ctor_explicit(self): - labels = {"foo": "bar"} - pool = self._make_one(labels=labels) - self.assertIsNone(pool._database) - self.assertEqual(pool.labels, labels) - - def test_bind_abstract(self): - pool = self._make_one() - database = _make_database("name") - with self.assertRaises(NotImplementedError): - pool.bind(database) - - def test_get_abstract(self): - pool = self._make_one() - with self.assertRaises(NotImplementedError): - pool.get() - - def test_put_abstract(self): - pool = self._make_one() - session = object() - with self.assertRaises(NotImplementedError): - pool.put(session) - - def test_clear_abstract(self): - pool = self._make_one() - with self.assertRaises(NotImplementedError): - pool.clear() - - def test__new_session_wo_labels(self): - pool = self._make_one() - database = pool._database = _make_database("name") - session = _make_session() - database.session.return_value = session - - new_session = pool._new_session() - - self.assertIs(new_session, session) - database.session.assert_called_once_with() - - def test__new_session_w_labels(self): - labels = {"foo": "bar"} - pool = self._make_one(labels=labels) - database = pool._database = _make_database("name") - session = _make_session() - database.session.return_value = session - - new_session = pool._new_session() - - self.assertIs(new_session, session) - database.session.assert_called_once_with(labels=labels) - - def test_session_wo_kwargs(self): - from google.cloud.spanner_v1.pool import SessionCheckout - - pool = self._make_one() - checkout = pool.session() - self.assertIsInstance(checkout, SessionCheckout) - self.assertIs(checkout._pool, pool) - self.assertIsNone(checkout._session) - self.assertEqual(checkout._kwargs, {}) - - def test_session_w_kwargs(self): - from google.cloud.spanner_v1.pool import SessionCheckout - - pool = self._make_one() - checkout = pool.session(foo="bar") - self.assertIsInstance(checkout, SessionCheckout) - self.assertIs(checkout._pool, pool) - self.assertIsNone(checkout._session) - self.assertEqual(checkout._kwargs, {"foo": "bar"}) - - -class TestFixedSizePool(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import FixedSizePool - - return FixedSizePool - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - pool = self._make_one() - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, {}) - - def test_ctor_explicit(self): - labels = {"foo": "bar"} - pool = self._make_one(size=4, default_timeout=30, labels=labels) - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 4) - self.assertEqual(pool.default_timeout, 30) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, labels) - - def test_bind(self): - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database)] * 10 - database._sessions.extend(SESSIONS) - - pool.bind(database) - - self.assertIs(pool._database, database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - - def test_get_non_expired(self): - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = sorted([_Session(database) for i in range(0, 4)]) - database._sessions.extend(SESSIONS) - pool.bind(database) - - # check if sessions returned in LIFO order - for i in (3, 2, 1, 0): - session = pool.get() - self.assertIs(session, SESSIONS[i]) - self.assertTrue(session._exists_checked) - self.assertFalse(pool._sessions.full()) - - def test_get_expired(self): - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 5 - SESSIONS[0]._exists = False - database._sessions.extend(SESSIONS) - pool.bind(database) - - session = pool.get() - - self.assertIs(session, SESSIONS[4]) - session.create.assert_called() - self.assertTrue(SESSIONS[0]._exists_checked) - self.assertFalse(pool._sessions.full()) - - def test_get_empty_default_timeout(self): - from six.moves.queue import Empty - - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - - with self.assertRaises(Empty): - pool.get() - - self.assertEqual(queue._got, {"block": True, "timeout": 10}) - - def test_get_empty_explicit_timeout(self): - from six.moves.queue import Empty - - pool = self._make_one(size=1, default_timeout=0.1) - queue = pool._sessions = _Queue() - - with self.assertRaises(Empty): - pool.get(timeout=1) - - self.assertEqual(queue._got, {"block": True, "timeout": 1}) - - def test_put_full(self): - from six.moves.queue import Full - - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 4 - database._sessions.extend(SESSIONS) - pool.bind(database) - - with self.assertRaises(Full): - pool.put(_Session(database)) - - self.assertTrue(pool._sessions.full()) - - def test_put_non_full(self): - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 4 - database._sessions.extend(SESSIONS) - pool.bind(database) - pool._sessions.get() - - pool.put(_Session(database)) - - self.assertTrue(pool._sessions.full()) - - def test_clear(self): - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database)] * 10 - database._sessions.extend(SESSIONS) - pool.bind(database) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - - pool.clear() - - for session in SESSIONS: - self.assertTrue(session._deleted) - - -class TestBurstyPool(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import BurstyPool - - return BurstyPool - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - pool = self._make_one() - self.assertIsNone(pool._database) - self.assertEqual(pool.target_size, 10) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, {}) - - def test_ctor_explicit(self): - labels = {"foo": "bar"} - pool = self._make_one(target_size=4, labels=labels) - self.assertIsNone(pool._database) - self.assertEqual(pool.target_size, 4) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, labels) - - def test_get_empty(self): - pool = self._make_one() - database = _Database("name") - database._sessions.append(_Session(database)) - pool.bind(database) - - session = pool.get() - - self.assertIsInstance(session, _Session) - self.assertIs(session._database, database) - session.create.assert_called() - self.assertTrue(pool._sessions.empty()) - - def test_get_non_empty_session_exists(self): - pool = self._make_one() - database = _Database("name") - previous = _Session(database) - pool.bind(database) - pool.put(previous) - - session = pool.get() - - self.assertIs(session, previous) - session.create.assert_not_called() - self.assertTrue(session._exists_checked) - self.assertTrue(pool._sessions.empty()) - - def test_get_non_empty_session_expired(self): - pool = self._make_one() - database = _Database("name") - previous = _Session(database, exists=False) - newborn = _Session(database) - database._sessions.append(newborn) - pool.bind(database) - pool.put(previous) - - session = pool.get() - - self.assertTrue(previous._exists_checked) - self.assertIs(session, newborn) - session.create.assert_called() - self.assertFalse(session._exists_checked) - self.assertTrue(pool._sessions.empty()) - - def test_put_empty(self): - pool = self._make_one() - database = _Database("name") - pool.bind(database) - session = _Session(database) - - pool.put(session) - - self.assertFalse(pool._sessions.empty()) - - def test_put_full(self): - pool = self._make_one(target_size=1) - database = _Database("name") - pool.bind(database) - older = _Session(database) - pool.put(older) - self.assertFalse(pool._sessions.empty()) - - younger = _Session(database) - pool.put(younger) # discarded silently - - self.assertTrue(younger._deleted) - self.assertIs(pool.get(), older) - - def test_put_full_expired(self): - pool = self._make_one(target_size=1) - database = _Database("name") - pool.bind(database) - older = _Session(database) - pool.put(older) - self.assertFalse(pool._sessions.empty()) - - younger = _Session(database, exists=False) - pool.put(younger) # discarded silently - - self.assertTrue(younger._deleted) - self.assertIs(pool.get(), older) - - def test_clear(self): - pool = self._make_one() - database = _Database("name") - pool.bind(database) - previous = _Session(database) - pool.put(previous) - - pool.clear() - - self.assertTrue(previous._deleted) - - -class TestPingingPool(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import PingingPool - - return PingingPool - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - pool = self._make_one() - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertEqual(pool._delta.seconds, 3000) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, {}) - - def test_ctor_explicit(self): - labels = {"foo": "bar"} - pool = self._make_one( - size=4, default_timeout=30, ping_interval=1800, labels=labels - ) - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 4) - self.assertEqual(pool.default_timeout, 30) - self.assertEqual(pool._delta.seconds, 1800) - self.assertTrue(pool._sessions.empty()) - self.assertEqual(pool.labels, labels) - - def test_bind(self): - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database)] * 10 - database._sessions.extend(SESSIONS) - pool.bind(database) - - self.assertIs(pool._database, database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertEqual(pool._delta.seconds, 3000) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - - def test_get_hit_no_ping(self): - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 4 - database._sessions.extend(SESSIONS) - pool.bind(database) - - session = pool.get() - - self.assertIs(session, SESSIONS[0]) - self.assertFalse(session._exists_checked) - self.assertFalse(pool._sessions.full()) - - def test_get_hit_w_ping(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 4 - database._sessions.extend(SESSIONS) - - sessions_created = datetime.datetime.utcnow() - datetime.timedelta(seconds=4000) - - with _Monkey(MUT, _NOW=lambda: sessions_created): - pool.bind(database) - - session = pool.get() - - self.assertIs(session, SESSIONS[0]) - self.assertTrue(session._exists_checked) - self.assertFalse(pool._sessions.full()) - - def test_get_hit_w_ping_expired(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 5 - SESSIONS[0]._exists = False - database._sessions.extend(SESSIONS) - - sessions_created = datetime.datetime.utcnow() - datetime.timedelta(seconds=4000) - - with _Monkey(MUT, _NOW=lambda: sessions_created): - pool.bind(database) - - session = pool.get() - - self.assertIs(session, SESSIONS[4]) - session.create.assert_called() - self.assertTrue(SESSIONS[0]._exists_checked) - self.assertFalse(pool._sessions.full()) - - def test_get_empty_default_timeout(self): - from six.moves.queue import Empty - - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - - with self.assertRaises(Empty): - pool.get() - - self.assertEqual(queue._got, {"block": True, "timeout": 10}) - - def test_get_empty_explicit_timeout(self): - from six.moves.queue import Empty - - pool = self._make_one(size=1, default_timeout=0.1) - queue = pool._sessions = _Queue() - - with self.assertRaises(Empty): - pool.get(timeout=1) - - self.assertEqual(queue._got, {"block": True, "timeout": 1}) - - def test_put_full(self): - from six.moves.queue import Full - - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database)] * 4 - database._sessions.extend(SESSIONS) - pool.bind(database) - - with self.assertRaises(Full): - pool.put(_Session(database)) - - self.assertTrue(pool._sessions.full()) - - def test_put_non_full(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - - now = datetime.datetime.utcnow() - database = _Database("name") - session = _Session(database) - - with _Monkey(MUT, _NOW=lambda: now): - pool.put(session) - - self.assertEqual(len(queue._items), 1) - ping_after, queued = queue._items[0] - self.assertEqual(ping_after, now + datetime.timedelta(seconds=3000)) - self.assertIs(queued, session) - - def test_clear(self): - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database)] * 10 - database._sessions.extend(SESSIONS) - pool.bind(database) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - - pool.clear() - - for session in SESSIONS: - self.assertTrue(session._deleted) - - def test_ping_empty(self): - pool = self._make_one(size=1) - pool.ping() # Does not raise 'Empty' - - def test_ping_oldest_fresh(self): - pool = self._make_one(size=1) - database = _Database("name") - SESSIONS = [_Session(database)] * 1 - database._sessions.extend(SESSIONS) - pool.bind(database) - - pool.ping() - - self.assertFalse(SESSIONS[0]._exists_checked) - - def test_ping_oldest_stale_but_exists(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - pool = self._make_one(size=1) - database = _Database("name") - SESSIONS = [_Session(database)] * 1 - database._sessions.extend(SESSIONS) - pool.bind(database) - - later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000) - with _Monkey(MUT, _NOW=lambda: later): - pool.ping() - - self.assertTrue(SESSIONS[0]._exists_checked) - - def test_ping_oldest_stale_and_not_exists(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - pool = self._make_one(size=1) - database = _Database("name") - SESSIONS = [_Session(database)] * 2 - SESSIONS[0]._exists = False - database._sessions.extend(SESSIONS) - pool.bind(database) - - later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000) - with _Monkey(MUT, _NOW=lambda: later): - pool.ping() - - self.assertTrue(SESSIONS[0]._exists_checked) - SESSIONS[1].create.assert_called() - - -class TestTransactionPingingPool(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import TransactionPingingPool - - return TransactionPingingPool - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - pool = self._make_one() - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertEqual(pool._delta.seconds, 3000) - self.assertTrue(pool._sessions.empty()) - self.assertTrue(pool._pending_sessions.empty()) - self.assertEqual(pool.labels, {}) - - def test_ctor_explicit(self): - labels = {"foo": "bar"} - pool = self._make_one( - size=4, default_timeout=30, ping_interval=1800, labels=labels - ) - self.assertIsNone(pool._database) - self.assertEqual(pool.size, 4) - self.assertEqual(pool.default_timeout, 30) - self.assertEqual(pool._delta.seconds, 1800) - self.assertTrue(pool._sessions.empty()) - self.assertTrue(pool._pending_sessions.empty()) - self.assertEqual(pool.labels, labels) - - def test_bind(self): - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database) for _ in range(10)] - database._sessions.extend(SESSIONS) - pool.bind(database) - - self.assertIs(pool._database, database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertEqual(pool._delta.seconds, 3000) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - txn = session._transaction - txn.begin.assert_called_once_with() - - self.assertTrue(pool._pending_sessions.empty()) - - def test_bind_w_timestamp_race(self): - import datetime - from google.cloud._testing import _Monkey - from google.cloud.spanner_v1 import pool as MUT - - NOW = datetime.datetime.utcnow() - pool = self._make_one() - database = _Database("name") - SESSIONS = [_Session(database) for _ in range(10)] - database._sessions.extend(SESSIONS) - - with _Monkey(MUT, _NOW=lambda: NOW): - pool.bind(database) - - self.assertIs(pool._database, database) - self.assertEqual(pool.size, 10) - self.assertEqual(pool.default_timeout, 10) - self.assertEqual(pool._delta.seconds, 3000) - self.assertTrue(pool._sessions.full()) - - api = database.spanner_api - self.assertEqual(api.batch_create_sessions.call_count, 5) - for session in SESSIONS: - session.create.assert_not_called() - txn = session._transaction - txn.begin.assert_called_once_with() - - self.assertTrue(pool._pending_sessions.empty()) - - def test_put_full(self): - from six.moves.queue import Full - - pool = self._make_one(size=4) - database = _Database("name") - SESSIONS = [_Session(database) for _ in range(4)] - database._sessions.extend(SESSIONS) - pool.bind(database) - - with self.assertRaises(Full): - pool.put(_Session(database)) - - self.assertTrue(pool._sessions.full()) - - def test_put_non_full_w_active_txn(self): - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - pending = pool._pending_sessions = _Queue() - database = _Database("name") - session = _Session(database) - txn = session.transaction() - - pool.put(session) - - self.assertEqual(len(queue._items), 1) - _, queued = queue._items[0] - self.assertIs(queued, session) - - self.assertEqual(len(pending._items), 0) - txn.begin.assert_not_called() - - def test_put_non_full_w_committed_txn(self): - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - pending = pool._pending_sessions = _Queue() - database = _Database("name") - session = _Session(database) - committed = session.transaction() - committed.committed = True - - pool.put(session) - - self.assertEqual(len(queue._items), 0) - - self.assertEqual(len(pending._items), 1) - self.assertIs(pending._items[0], session) - self.assertIsNot(session._transaction, committed) - session._transaction.begin.assert_not_called() - - def test_put_non_full(self): - pool = self._make_one(size=1) - queue = pool._sessions = _Queue() - pending = pool._pending_sessions = _Queue() - database = _Database("name") - session = _Session(database) - - pool.put(session) - - self.assertEqual(len(queue._items), 0) - self.assertEqual(len(pending._items), 1) - self.assertIs(pending._items[0], session) - - self.assertFalse(pending.empty()) - - def test_begin_pending_transactions_empty(self): - pool = self._make_one(size=1) - pool.begin_pending_transactions() # no raise - - def test_begin_pending_transactions_non_empty(self): - pool = self._make_one(size=1) - pool._sessions = _Queue() - - database = _Database("name") - TRANSACTIONS = [_make_transaction(object())] - PENDING_SESSIONS = [_Session(database, transaction=txn) for txn in TRANSACTIONS] - - pending = pool._pending_sessions = _Queue(*PENDING_SESSIONS) - self.assertFalse(pending.empty()) - - pool.begin_pending_transactions() # no raise - - for txn in TRANSACTIONS: - txn.begin.assert_called_once_with() - - self.assertTrue(pending.empty()) - - -class TestSessionCheckout(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.pool import SessionCheckout - - return SessionCheckout - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_wo_kwargs(self): - pool = _Pool() - checkout = self._make_one(pool) - self.assertIs(checkout._pool, pool) - self.assertIsNone(checkout._session) - self.assertEqual(checkout._kwargs, {}) - - def test_ctor_w_kwargs(self): - pool = _Pool() - checkout = self._make_one(pool, foo="bar") - self.assertIs(checkout._pool, pool) - self.assertIsNone(checkout._session) - self.assertEqual(checkout._kwargs, {"foo": "bar"}) - - def test_context_manager_wo_kwargs(self): - session = object() - pool = _Pool(session) - checkout = self._make_one(pool) - - self.assertEqual(len(pool._items), 1) - self.assertIs(pool._items[0], session) - - with checkout as borrowed: - self.assertIs(borrowed, session) - self.assertEqual(len(pool._items), 0) - - self.assertEqual(len(pool._items), 1) - self.assertIs(pool._items[0], session) - self.assertEqual(pool._got, {}) - - def test_context_manager_w_kwargs(self): - session = object() - pool = _Pool(session) - checkout = self._make_one(pool, foo="bar") - - self.assertEqual(len(pool._items), 1) - self.assertIs(pool._items[0], session) - - with checkout as borrowed: - self.assertIs(borrowed, session) - self.assertEqual(len(pool._items), 0) - - self.assertEqual(len(pool._items), 1) - self.assertIs(pool._items[0], session) - self.assertEqual(pool._got, {"foo": "bar"}) - - -def _make_transaction(*args, **kw): - from google.cloud.spanner_v1.transaction import Transaction - - txn = mock.create_autospec(Transaction)(*args, **kw) - txn.committed = None - txn._rolled_back = False - return txn - - -@total_ordering -class _Session(object): - - _transaction = None - - def __init__(self, database, exists=True, transaction=None): - self._database = database - self._exists = exists - self._exists_checked = False - self.create = mock.Mock() - self._deleted = False - self._transaction = transaction - - def __lt__(self, other): - return id(self) < id(other) - - def exists(self): - self._exists_checked = True - return self._exists - - def delete(self): - from google.cloud.exceptions import NotFound - - self._deleted = True - if not self._exists: - raise NotFound("unknown session") - - def transaction(self): - txn = self._transaction = _make_transaction(self) - return txn - - -class _Database(object): - def __init__(self, name): - self.name = name - self._sessions = [] - - def mock_batch_create_sessions(db, session_count=10, timeout=10, metadata=[]): - from google.cloud.spanner_v1.proto import spanner_pb2 - - response = spanner_pb2.BatchCreateSessionsResponse() - if session_count < 2: - response.session.add() - else: - response.session.add() - response.session.add() - return response - - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient - - self.spanner_api = mock.create_autospec(SpannerClient, instance=True) - self.spanner_api.batch_create_sessions.side_effect = mock_batch_create_sessions - - def session(self): - # always return first session in the list - # to avoid reversing the order of putting - # sessions into pool (important for order tests) - return self._sessions.pop(0) - - -class _Queue(object): - - _size = 1 - - def __init__(self, *items): - self._items = list(items) - - def empty(self): - return len(self._items) == 0 - - def full(self): - return len(self._items) >= self._size - - def get(self, **kwargs): - from six.moves.queue import Empty - - self._got = kwargs - try: - return self._items.pop() - except IndexError: - raise Empty() - - def put(self, item, **kwargs): - self._put = kwargs - self._items.append(item) - - def put_nowait(self, item, **kwargs): - self._put_nowait = kwargs - self._items.append(item) - - -class _Pool(_Queue): - - _database = None diff --git a/spanner/tests/unit/test_session.py b/spanner/tests/unit/test_session.py deleted file mode 100644 index 98d98deaba82..000000000000 --- a/spanner/tests/unit/test_session.py +++ /dev/null @@ -1,1107 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest -import google.api_core.gapic_v1.method -import mock - - -def _make_rpc_error(error_cls, trailing_metadata=None): - import grpc - - grpc_error = mock.create_autospec(grpc.Call, instance=True) - grpc_error.trailing_metadata.return_value = trailing_metadata - return error_cls("error", errors=(grpc_error,)) - - -class TestSession(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - DATABASE_ID = "database-id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session-id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - - def _getTargetClass(self): - from google.cloud.spanner_v1.session import Session - - return Session - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - @staticmethod - def _make_database(name=DATABASE_NAME): - from google.cloud.spanner_v1.database import Database - - database = mock.create_autospec(Database, instance=True) - database.name = name - return database - - @staticmethod - def _make_session_pb(name, labels=None): - from google.cloud.spanner_v1.proto.spanner_pb2 import Session - - return Session(name=name, labels=labels) - - def _make_spanner_api(self): - from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient - - return mock.Mock(autospec=SpannerClient, instance=True) - - def test_constructor_wo_labels(self): - database = self._make_database() - session = self._make_one(database) - self.assertIs(session.session_id, None) - self.assertIs(session._database, database) - self.assertEqual(session.labels, {}) - - def test_constructor_w_labels(self): - database = self._make_database() - labels = {"foo": "bar"} - session = self._make_one(database, labels=labels) - self.assertIs(session.session_id, None) - self.assertIs(session._database, database) - self.assertEqual(session.labels, labels) - - def test___lt___(self): - database = self._make_database() - lhs = self._make_one(database) - lhs._session_id = b"123" - rhs = self._make_one(database) - rhs._session_id = b"234" - self.assertTrue(lhs < rhs) - - def test_name_property_wo_session_id(self): - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - (session.name) - - def test_name_property_w_session_id(self): - database = self._make_database() - session = self._make_one(database) - session._session_id = self.SESSION_ID - self.assertEqual(session.name, self.SESSION_NAME) - - def test_create_w_session_id(self): - database = self._make_database() - session = self._make_one(database) - session._session_id = self.SESSION_ID - - with self.assertRaises(ValueError): - session.create() - - def test_create_ok(self): - session_pb = self._make_session_pb(self.SESSION_NAME) - gax_api = self._make_spanner_api() - gax_api.create_session.return_value = session_pb - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - - session.create() - - self.assertEqual(session.session_id, self.SESSION_ID) - - gax_api.create_session.assert_called_once_with( - database.name, metadata=[("google-cloud-resource-prefix", database.name)] - ) - - def test_create_w_labels(self): - labels = {"foo": "bar"} - session_pb = self._make_session_pb(self.SESSION_NAME, labels=labels) - gax_api = self._make_spanner_api() - gax_api.create_session.return_value = session_pb - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database, labels=labels) - - session.create() - - self.assertEqual(session.session_id, self.SESSION_ID) - - gax_api.create_session.assert_called_once_with( - database.name, - session={"labels": labels}, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_create_error(self): - from google.api_core.exceptions import Unknown - - gax_api = self._make_spanner_api() - gax_api.create_session.side_effect = Unknown("error") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - - with self.assertRaises(Unknown): - session.create() - - def test_exists_wo_session_id(self): - database = self._make_database() - session = self._make_one(database) - self.assertFalse(session.exists()) - - def test_exists_hit(self): - session_pb = self._make_session_pb(self.SESSION_NAME) - gax_api = self._make_spanner_api() - gax_api.get_session.return_value = session_pb - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - self.assertTrue(session.exists()) - - gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_exists_miss(self): - from google.api_core.exceptions import NotFound - - gax_api = self._make_spanner_api() - gax_api.get_session.side_effect = NotFound("testing") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - self.assertFalse(session.exists()) - - gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_exists_error(self): - from google.api_core.exceptions import Unknown - - gax_api = self._make_spanner_api() - gax_api.get_session.side_effect = Unknown("testing") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - with self.assertRaises(Unknown): - session.exists() - - gax_api.get_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_delete_wo_session_id(self): - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.delete() - - def test_delete_hit(self): - gax_api = self._make_spanner_api() - gax_api.delete_session.return_value = None - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - session.delete() - - gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_delete_miss(self): - from google.cloud.exceptions import NotFound - - gax_api = self._make_spanner_api() - gax_api.delete_session.side_effect = NotFound("testing") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - with self.assertRaises(NotFound): - session.delete() - - gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_delete_error(self): - from google.api_core.exceptions import Unknown - - gax_api = self._make_spanner_api() - gax_api.delete_session.side_effect = Unknown("testing") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - with self.assertRaises(Unknown): - session.delete() - - gax_api.delete_session.assert_called_once_with( - self.SESSION_NAME, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_snapshot_not_created(self): - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.snapshot() - - def test_snapshot_created(self): - from google.cloud.spanner_v1.snapshot import Snapshot - - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" # emulate 'session.create()' - - snapshot = session.snapshot() - - self.assertIsInstance(snapshot, Snapshot) - self.assertIs(snapshot._session, session) - self.assertTrue(snapshot._strong) - self.assertFalse(snapshot._multi_use) - - def test_snapshot_created_w_multi_use(self): - from google.cloud.spanner_v1.snapshot import Snapshot - - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" # emulate 'session.create()' - - snapshot = session.snapshot(multi_use=True) - - self.assertIsInstance(snapshot, Snapshot) - self.assertTrue(snapshot._session is session) - self.assertTrue(snapshot._strong) - self.assertTrue(snapshot._multi_use) - - def test_read_not_created(self): - from google.cloud.spanner_v1.keyset import KeySet - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - KEYS = ["bharney@example.com", "phred@example.com"] - KEYSET = KeySet(keys=KEYS) - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.read(TABLE_NAME, COLUMNS, KEYSET) - - def test_read(self): - from google.cloud.spanner_v1.keyset import KeySet - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - KEYS = ["bharney@example.com", "phred@example.com"] - KEYSET = KeySet(keys=KEYS) - INDEX = "email-address-index" - LIMIT = 20 - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: - found = session.read(TABLE_NAME, COLUMNS, KEYSET, index=INDEX, limit=LIMIT) - - self.assertIs(found, snapshot().read.return_value) - - snapshot().read.assert_called_once_with( - TABLE_NAME, COLUMNS, KEYSET, INDEX, LIMIT - ) - - def test_execute_sql_not_created(self): - SQL = "SELECT first_name, age FROM citizens" - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.execute_sql(SQL) - - def test_execute_sql_defaults(self): - SQL = "SELECT first_name, age FROM citizens" - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: - found = session.execute_sql(SQL) - - self.assertIs(found, snapshot().execute_sql.return_value) - - snapshot().execute_sql.assert_called_once_with( - SQL, - None, - None, - None, - timeout=google.api_core.gapic_v1.method.DEFAULT, - retry=google.api_core.gapic_v1.method.DEFAULT, - ) - - def test_execute_sql_non_default_retry(self): - from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1.proto.type_pb2 import STRING - - SQL = "SELECT first_name, age FROM citizens" - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - params = Struct(fields={"foo": Value(string_value="bar")}) - param_types = {"foo": STRING} - - with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: - found = session.execute_sql( - SQL, params, param_types, "PLAN", retry=None, timeout=None - ) - - self.assertIs(found, snapshot().execute_sql.return_value) - - snapshot().execute_sql.assert_called_once_with( - SQL, params, param_types, "PLAN", timeout=None, retry=None - ) - - def test_execute_sql_explicit(self): - from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1.proto.type_pb2 import STRING - - SQL = "SELECT first_name, age FROM citizens" - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - params = Struct(fields={"foo": Value(string_value="bar")}) - param_types = {"foo": STRING} - - with mock.patch("google.cloud.spanner_v1.session.Snapshot") as snapshot: - found = session.execute_sql(SQL, params, param_types, "PLAN") - - self.assertIs(found, snapshot().execute_sql.return_value) - - snapshot().execute_sql.assert_called_once_with( - SQL, - params, - param_types, - "PLAN", - timeout=google.api_core.gapic_v1.method.DEFAULT, - retry=google.api_core.gapic_v1.method.DEFAULT, - ) - - def test_batch_not_created(self): - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.batch() - - def test_batch_created(self): - from google.cloud.spanner_v1.batch import Batch - - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - batch = session.batch() - - self.assertIsInstance(batch, Batch) - self.assertIs(batch._session, session) - - def test_transaction_not_created(self): - database = self._make_database() - session = self._make_one(database) - - with self.assertRaises(ValueError): - session.transaction() - - def test_transaction_created(self): - from google.cloud.spanner_v1.transaction import Transaction - - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - transaction = session.transaction() - - self.assertIsInstance(transaction, Transaction) - self.assertIs(transaction._session, session) - self.assertIs(session._transaction, transaction) - - def test_transaction_w_existing_txn(self): - database = self._make_database() - session = self._make_one(database) - session._session_id = "DEADBEEF" - - existing = session.transaction() - another = session.transaction() # invalidates existing txn - - self.assertIs(session._transaction, another) - self.assertTrue(existing._rolled_back) - - def test_run_in_transaction_callback_raises_non_gax_error(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - transaction_pb = TransactionPB(id=TRANSACTION_ID) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.rollback.return_value = None - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - class Testing(Exception): - pass - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - raise Testing() - - with self.assertRaises(Testing): - session.run_in_transaction(unit_of_work) - - self.assertIsNone(session._transaction) - self.assertEqual(len(called_with), 1) - txn, args, kw = called_with[0] - self.assertIsInstance(txn, Transaction) - self.assertIsNone(txn.committed) - self.assertTrue(txn._rolled_back) - self.assertEqual(args, ()) - self.assertEqual(kw, {}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - gax_api.rollback.assert_called_once_with( - self.SESSION_NAME, - TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_run_in_transaction_callback_raises_non_abort_rpc_error(self): - from google.api_core.exceptions import Cancelled - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - transaction_pb = TransactionPB(id=TRANSACTION_ID) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.rollback.return_value = None - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - raise Cancelled("error") - - with self.assertRaises(Cancelled): - session.run_in_transaction(unit_of_work) - - self.assertIsNone(session._transaction) - self.assertEqual(len(called_with), 1) - txn, args, kw = called_with[0] - self.assertIsInstance(txn, Transaction) - self.assertIsNone(txn.committed) - self.assertFalse(txn._rolled_back) - self.assertEqual(args, ()) - self.assertEqual(kw, {}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - gax_api.rollback.assert_not_called() - - def test_run_in_transaction_w_args_w_kwargs_wo_abort(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - transaction_pb = TransactionPB(id=TRANSACTION_ID) - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.return_value = response - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - return 42 - - return_value = session.run_in_transaction(unit_of_work, "abc", some_arg="def") - - self.assertIsNone(session._transaction) - self.assertEqual(len(called_with), 1) - txn, args, kw = called_with[0] - self.assertIsInstance(txn, Transaction) - self.assertEqual(return_value, 42) - self.assertEqual(args, ("abc",)) - self.assertEqual(kw, {"some_arg": "def"}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - gax_api.commit.assert_called_once_with( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_run_in_transaction_w_commit_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - gax_api = self._make_spanner_api() - gax_api.commit.side_effect = Unknown("error") - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - begun_txn = session._transaction = Transaction(session) - begun_txn._transaction_id = TRANSACTION_ID - - assert session._transaction._transaction_id - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - - with self.assertRaises(Unknown): - session.run_in_transaction(unit_of_work) - - self.assertIsNone(session._transaction) - self.assertEqual(len(called_with), 1) - txn, args, kw = called_with[0] - self.assertIs(txn, begun_txn) - self.assertEqual(txn.committed, None) - self.assertEqual(args, ()) - self.assertEqual(kw, {}) - - gax_api.begin_transaction.assert_not_called() - gax_api.commit.assert_called_once_with( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_run_in_transaction_w_abort_no_retry_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - transaction_pb = TransactionPB(id=TRANSACTION_ID) - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - aborted = _make_rpc_error(Aborted, trailing_metadata=[]) - response = CommitResponse(commit_timestamp=now_pb) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.side_effect = [aborted, response] - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - return "answer" - - return_value = session.run_in_transaction(unit_of_work, "abc", some_arg="def") - - self.assertEqual(len(called_with), 2) - for index, (txn, args, kw) in enumerate(called_with): - self.assertIsInstance(txn, Transaction) - self.assertEqual(return_value, "answer") - self.assertEqual(args, ("abc",)) - self.assertEqual(kw, {"some_arg": "def"}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - self.assertEqual( - gax_api.begin_transaction.call_args_list, - [ - mock.call( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 2, - ) - self.assertEqual( - gax_api.commit.call_args_list, - [ - mock.call( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 2, - ) - - def test_run_in_transaction_w_abort_w_retry_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - RETRY_SECONDS = 12 - RETRY_NANOS = 3456 - retry_info = RetryInfo( - retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS) - ) - trailing_metadata = [ - ("google.rpc.retryinfo-bin", retry_info.SerializeToString()) - ] - aborted = _make_rpc_error(Aborted, trailing_metadata=trailing_metadata) - transaction_pb = TransactionPB(id=TRANSACTION_ID) - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.side_effect = [aborted, response] - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - - with mock.patch("time.sleep") as sleep_mock: - session.run_in_transaction(unit_of_work, "abc", some_arg="def") - - sleep_mock.assert_called_once_with(RETRY_SECONDS + RETRY_NANOS / 1.0e9) - self.assertEqual(len(called_with), 2) - - for index, (txn, args, kw) in enumerate(called_with): - self.assertIsInstance(txn, Transaction) - if index == 1: - self.assertEqual(txn.committed, now) - else: - self.assertIsNone(txn.committed) - self.assertEqual(args, ("abc",)) - self.assertEqual(kw, {"some_arg": "def"}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - self.assertEqual( - gax_api.begin_transaction.call_args_list, - [ - mock.call( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 2, - ) - self.assertEqual( - gax_api.commit.call_args_list, - [ - mock.call( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 2, - ) - - def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - RETRY_SECONDS = 1 - RETRY_NANOS = 3456 - transaction_pb = TransactionPB(id=TRANSACTION_ID) - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - retry_info = RetryInfo( - retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS) - ) - trailing_metadata = [ - ("google.rpc.retryinfo-bin", retry_info.SerializeToString()) - ] - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.side_effect = [response] - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - if len(called_with) < 2: - raise _make_rpc_error(Aborted, trailing_metadata) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - - with mock.patch("time.sleep") as sleep_mock: - session.run_in_transaction(unit_of_work) - - sleep_mock.assert_called_once_with(RETRY_SECONDS + RETRY_NANOS / 1.0e9) - self.assertEqual(len(called_with), 2) - for index, (txn, args, kw) in enumerate(called_with): - self.assertIsInstance(txn, Transaction) - if index == 0: - self.assertIsNone(txn.committed) - else: - self.assertEqual(txn.committed, now) - self.assertEqual(args, ()) - self.assertEqual(kw, {}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - self.assertEqual( - gax_api.begin_transaction.call_args_list, - [ - mock.call( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 2, - ) - gax_api.commit.assert_called_once_with( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_run_in_transaction_w_abort_w_retry_metadata_deadline(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - RETRY_SECONDS = 1 - RETRY_NANOS = 3456 - transaction_pb = TransactionPB(id=TRANSACTION_ID) - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - retry_info = RetryInfo( - retry_delay=Duration(seconds=RETRY_SECONDS, nanos=RETRY_NANOS) - ) - trailing_metadata = [ - ("google.rpc.retryinfo-bin", retry_info.SerializeToString()) - ] - aborted = _make_rpc_error(Aborted, trailing_metadata=trailing_metadata) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.side_effect = [aborted, response] - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - - # retry once w/ timeout_secs=1 - def _time(_results=[1, 1.5]): - return _results.pop(0) - - with mock.patch("time.time", _time): - with mock.patch("time.sleep") as sleep_mock: - with self.assertRaises(Aborted): - session.run_in_transaction(unit_of_work, "abc", timeout_secs=1) - - sleep_mock.assert_not_called() - - self.assertEqual(len(called_with), 1) - txn, args, kw = called_with[0] - self.assertIsInstance(txn, Transaction) - self.assertIsNone(txn.committed) - self.assertEqual(args, ("abc",)) - self.assertEqual(kw, {}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - gax_api.begin_transaction.assert_called_once_with( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - gax_api.commit.assert_called_once_with( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_run_in_transaction_w_timeout(self): - from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - - TABLE_NAME = "citizens" - COLUMNS = ["email", "first_name", "last_name", "age"] - VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], - ] - TRANSACTION_ID = b"FACEDACE" - transaction_pb = TransactionPB(id=TRANSACTION_ID) - aborted = _make_rpc_error(Aborted, trailing_metadata=[]) - gax_api = self._make_spanner_api() - gax_api.begin_transaction.return_value = transaction_pb - gax_api.commit.side_effect = aborted - database = self._make_database() - database.spanner_api = gax_api - session = self._make_one(database) - session._session_id = self.SESSION_ID - - called_with = [] - - def unit_of_work(txn, *args, **kw): - called_with.append((txn, args, kw)) - txn.insert(TABLE_NAME, COLUMNS, VALUES) - - # retry several times to check backoff - def _time(_results=[1, 2, 4, 8]): - return _results.pop(0) - - with mock.patch("time.time", _time): - with mock.patch("time.sleep") as sleep_mock: - with self.assertRaises(Aborted): - session.run_in_transaction(unit_of_work, timeout_secs=8) - - # unpacking call args into list - call_args = [call_[0][0] for call_ in sleep_mock.call_args_list] - call_args = list(map(int, call_args)) - assert call_args == [2, 4] - assert sleep_mock.call_count == 2 - - self.assertEqual(len(called_with), 3) - for txn, args, kw in called_with: - self.assertIsInstance(txn, Transaction) - self.assertIsNone(txn.committed) - self.assertEqual(args, ()) - self.assertEqual(kw, {}) - - expected_options = TransactionOptions(read_write=TransactionOptions.ReadWrite()) - self.assertEqual( - gax_api.begin_transaction.call_args_list, - [ - mock.call( - self.SESSION_NAME, - expected_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 3, - ) - self.assertEqual( - gax_api.commit.call_args_list, - [ - mock.call( - self.SESSION_NAME, - mutations=txn._mutations, - transaction_id=TRANSACTION_ID, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - ] - * 3, - ) - - def test_delay_helper_w_no_delay(self): - from google.cloud.spanner_v1.session import _delay_until_retry - - metadata_mock = mock.Mock() - metadata_mock.trailing_metadata.return_value = {} - - exc_mock = mock.Mock(errors=[metadata_mock]) - - def _time_func(): - return 3 - - # check if current time > deadline - with mock.patch("time.time", _time_func): - with self.assertRaises(Exception): - _delay_until_retry(exc_mock, 2, 1) - - with mock.patch("time.time", _time_func): - with mock.patch( - "google.cloud.spanner_v1.session._get_retry_delay" - ) as get_retry_delay_mock: - with mock.patch("time.sleep") as sleep_mock: - get_retry_delay_mock.return_value = None - - _delay_until_retry(exc_mock, 6, 1) - sleep_mock.assert_not_called() diff --git a/spanner/tests/unit/test_snapshot.py b/spanner/tests/unit/test_snapshot.py deleted file mode 100644 index 883ab7325835..000000000000 --- a/spanner/tests/unit/test_snapshot.py +++ /dev/null @@ -1,1002 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest -import google.api_core.gapic_v1.method -import mock - - -TABLE_NAME = "citizens" -COLUMNS = ["email", "first_name", "last_name", "age"] -SQL_QUERY = """\ -SELECT first_name, last_name, age FROM citizens ORDER BY age""" -SQL_QUERY_WITH_PARAM = """ -SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age""" -PARAMS = {"max_age": 30} -PARAM_TYPES = {"max_age": "INT64"} -SQL_QUERY_WITH_BYTES_PARAM = """\ -SELECT image_name FROM images WHERE @bytes IN image_data""" -PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"} -RESUME_TOKEN = b"DEADBEEF" -TXN_ID = b"DEAFBEAD" -SECONDS = 3 -MICROS = 123456 - - -class Test_restart_on_unavailable(unittest.TestCase): - def _call_fut(self, restart): - from google.cloud.spanner_v1.snapshot import _restart_on_unavailable - - return _restart_on_unavailable(restart) - - def _make_item(self, value, resume_token=b""): - return mock.Mock( - value=value, resume_token=resume_token, spec=["value", "resume_token"] - ) - - def test_iteration_w_empty_raw(self): - raw = _MockIterator() - restart = mock.Mock(spec=[], return_value=raw) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), []) - - def test_iteration_w_non_empty_raw(self): - ITEMS = (self._make_item(0), self._make_item(1)) - raw = _MockIterator(*ITEMS) - restart = mock.Mock(spec=[], return_value=raw) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), list(ITEMS)) - restart.assert_called_once_with() - - def test_iteration_w_raw_w_resume_tken(self): - ITEMS = ( - self._make_item(0), - self._make_item(1, resume_token=RESUME_TOKEN), - self._make_item(2), - self._make_item(3), - ) - raw = _MockIterator(*ITEMS) - restart = mock.Mock(spec=[], return_value=raw) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), list(ITEMS)) - restart.assert_called_once_with() - - def test_iteration_w_raw_raising_unavailable_no_token(self): - ITEMS = ( - self._make_item(0), - self._make_item(1, resume_token=RESUME_TOKEN), - self._make_item(2), - ) - before = _MockIterator(fail_after=True) - after = _MockIterator(*ITEMS) - restart = mock.Mock(spec=[], side_effect=[before, after]) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), list(ITEMS)) - self.assertEqual(restart.mock_calls, [mock.call(), mock.call(resume_token=b"")]) - - def test_iteration_w_raw_raising_unavailable(self): - FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN)) - SECOND = (self._make_item(2),) # discarded after 503 - LAST = (self._make_item(3),) - before = _MockIterator(*(FIRST + SECOND), fail_after=True) - after = _MockIterator(*LAST) - restart = mock.Mock(spec=[], side_effect=[before, after]) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), list(FIRST + LAST)) - self.assertEqual( - restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)] - ) - - def test_iteration_w_raw_raising_unavailable_after_token(self): - FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN)) - SECOND = (self._make_item(2), self._make_item(3)) - before = _MockIterator(*FIRST, fail_after=True) - after = _MockIterator(*SECOND) - restart = mock.Mock(spec=[], side_effect=[before, after]) - resumable = self._call_fut(restart) - self.assertEqual(list(resumable), list(FIRST + SECOND)) - self.assertEqual( - restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)] - ) - - -class Test_SnapshotBase(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - DATABASE_ID = "database-id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session-id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - - def _getTargetClass(self): - from google.cloud.spanner_v1.snapshot import _SnapshotBase - - return _SnapshotBase - - def _make_one(self, session): - return self._getTargetClass()(session) - - def _makeDerived(self, session): - class _Derived(self._getTargetClass()): - - _transaction_id = None - _multi_use = False - - def _make_txn_selector(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - TransactionOptions, - TransactionSelector, - ) - - if self._transaction_id: - return TransactionSelector(id=self._transaction_id) - options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(strong=True) - ) - if self._multi_use: - return TransactionSelector(begin=options) - return TransactionSelector(single_use=options) - - return _Derived(session) - - def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client - - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) - - def test_ctor(self): - session = _Session() - base = self._make_one(session) - self.assertIs(base._session, session) - self.assertEqual(base._execute_sql_count, 0) - - def test__make_txn_selector_virtual(self): - session = _Session() - base = self._make_one(session) - with self.assertRaises(NotImplementedError): - base._make_txn_selector() - - def test_read_other_error(self): - from google.cloud.spanner_v1.keyset import KeySet - - keyset = KeySet(all_=True) - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.streaming_read.side_effect = RuntimeError() - session = _Session(database) - derived = self._makeDerived(session) - - with self.assertRaises(RuntimeError): - list(derived.read(TABLE_NAME, COLUMNS, keyset)) - - def _read_helper(self, multi_use, first=True, count=0, partition=None): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( - PartialResultSet, - ResultSetMetadata, - ResultSetStats, - ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - TransactionSelector, - TransactionOptions, - ) - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 - from google.cloud.spanner_v1.keyset import KeySet - from google.cloud.spanner_v1._helpers import _make_value_pb - - VALUES = [[u"bharney", 31], [u"phred", 32]] - VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] - struct_type_pb = StructType( - fields=[ - StructType.Field(name="name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), - ] - ) - metadata_pb = ResultSetMetadata(row_type=struct_type_pb) - stats_pb = ResultSetStats( - query_stats=Struct(fields={"rows_returned": _make_value_pb(2)}) - ) - result_sets = [ - PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb), - PartialResultSet(values=VALUE_PBS[1], stats=stats_pb), - ] - KEYS = [["bharney@example.com"], ["phred@example.com"]] - keyset = KeySet(keys=KEYS) - INDEX = "email-address-index" - LIMIT = 20 - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.streaming_read.return_value = _MockIterator(*result_sets) - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = multi_use - derived._read_request_count = count - if not first: - derived._transaction_id = TXN_ID - - if partition is not None: # 'limit' and 'partition' incompatible - result_set = derived.read( - TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition - ) - else: - result_set = derived.read( - TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT - ) - - self.assertEqual(derived._read_request_count, count + 1) - - if multi_use: - self.assertIs(result_set._source, derived) - else: - self.assertIsNone(result_set._source) - - self.assertEqual(list(result_set), VALUES) - self.assertEqual(result_set.metadata, metadata_pb) - self.assertEqual(result_set.stats, stats_pb) - - txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(strong=True) - ) - - if multi_use: - if first: - expected_transaction = TransactionSelector(begin=txn_options) - else: - expected_transaction = TransactionSelector(id=TXN_ID) - else: - expected_transaction = TransactionSelector(single_use=txn_options) - - if partition is not None: - expected_limit = 0 - else: - expected_limit = LIMIT - - api.streaming_read.assert_called_once_with( - self.SESSION_NAME, - TABLE_NAME, - COLUMNS, - keyset._to_pb(), - transaction=expected_transaction, - index=INDEX, - limit=expected_limit, - partition_token=partition, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_read_wo_multi_use(self): - self._read_helper(multi_use=False) - - def test_read_wo_multi_use_w_read_request_count_gt_0(self): - with self.assertRaises(ValueError): - self._read_helper(multi_use=False, count=1) - - def test_read_w_multi_use_wo_first(self): - self._read_helper(multi_use=True, first=False) - - def test_read_w_multi_use_wo_first_w_count_gt_0(self): - self._read_helper(multi_use=True, first=False, count=1) - - def test_read_w_multi_use_w_first_w_partition(self): - PARTITION = b"FADEABED" - self._read_helper(multi_use=True, first=True, partition=PARTITION) - - def test_read_w_multi_use_w_first_w_count_gt_0(self): - with self.assertRaises(ValueError): - self._read_helper(multi_use=True, first=True, count=1) - - def test_execute_sql_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.execute_streaming_sql.side_effect = RuntimeError() - session = _Session(database) - derived = self._makeDerived(session) - - with self.assertRaises(RuntimeError): - list(derived.execute_sql(SQL_QUERY)) - - def test_execute_sql_w_params_wo_param_types(self): - database = _Database() - session = _Session(database) - derived = self._makeDerived(session) - - with self.assertRaises(ValueError): - derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS) - - def _execute_sql_helper( - self, - multi_use, - first=True, - count=0, - partition=None, - sql_count=0, - timeout=google.api_core.gapic_v1.method.DEFAULT, - retry=google.api_core.gapic_v1.method.DEFAULT, - ): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( - PartialResultSet, - ResultSetMetadata, - ResultSetStats, - ) - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - TransactionSelector, - TransactionOptions, - ) - from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType - from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 - from google.cloud.spanner_v1._helpers import _make_value_pb - - VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]] - VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] - MODE = 2 # PROFILE - struct_type_pb = StructType( - fields=[ - StructType.Field(name="first_name", type=Type(code=STRING)), - StructType.Field(name="last_name", type=Type(code=STRING)), - StructType.Field(name="age", type=Type(code=INT64)), - ] - ) - metadata_pb = ResultSetMetadata(row_type=struct_type_pb) - stats_pb = ResultSetStats( - query_stats=Struct(fields={"rows_returned": _make_value_pb(2)}) - ) - result_sets = [ - PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb), - PartialResultSet(values=VALUE_PBS[1], stats=stats_pb), - ] - iterator = _MockIterator(*result_sets) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.execute_streaming_sql.return_value = iterator - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = multi_use - derived._read_request_count = count - derived._execute_sql_count = sql_count - if not first: - derived._transaction_id = TXN_ID - - result_set = derived.execute_sql( - SQL_QUERY_WITH_PARAM, - PARAMS, - PARAM_TYPES, - query_mode=MODE, - partition=partition, - retry=retry, - timeout=timeout, - ) - - self.assertEqual(derived._read_request_count, count + 1) - - if multi_use: - self.assertIs(result_set._source, derived) - else: - self.assertIsNone(result_set._source) - - self.assertEqual(list(result_set), VALUES) - self.assertEqual(result_set.metadata, metadata_pb) - self.assertEqual(result_set.stats, stats_pb) - - txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(strong=True) - ) - - if multi_use: - if first: - expected_transaction = TransactionSelector(begin=txn_options) - else: - expected_transaction = TransactionSelector(id=TXN_ID) - else: - expected_transaction = TransactionSelector(single_use=txn_options) - - expected_params = Struct( - fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} - ) - - api.execute_streaming_sql.assert_called_once_with( - self.SESSION_NAME, - SQL_QUERY_WITH_PARAM, - transaction=expected_transaction, - params=expected_params, - param_types=PARAM_TYPES, - query_mode=MODE, - partition_token=partition, - seqno=sql_count, - metadata=[("google-cloud-resource-prefix", database.name)], - timeout=timeout, - retry=retry, - ) - - self.assertEqual(derived._execute_sql_count, sql_count + 1) - - def test_execute_sql_wo_multi_use(self): - self._execute_sql_helper(multi_use=False) - - def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self): - with self.assertRaises(ValueError): - self._execute_sql_helper(multi_use=False, count=1) - - def test_execute_sql_w_multi_use_wo_first(self): - self._execute_sql_helper(multi_use=True, first=False, sql_count=1) - - def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self): - self._execute_sql_helper(multi_use=True, first=False, count=1) - - def test_execute_sql_w_multi_use_w_first(self): - self._execute_sql_helper(multi_use=True, first=True) - - def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self): - with self.assertRaises(ValueError): - self._execute_sql_helper(multi_use=True, first=True, count=1) - - def test_execute_sql_w_retry(self): - self._execute_sql_helper(multi_use=False, retry=None) - - def test_execute_sql_w_timeout(self): - self._execute_sql_helper(multi_use=False, timeout=None) - - def _partition_read_helper( - self, multi_use, w_txn, size=None, max_partitions=None, index=None - ): - from google.cloud.spanner_v1.keyset import KeySet - from google.cloud.spanner_v1.types import Partition - from google.cloud.spanner_v1.types import PartitionOptions - from google.cloud.spanner_v1.types import PartitionResponse - from google.cloud.spanner_v1.types import Transaction - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - - keyset = KeySet(all_=True) - new_txn_id = b"ABECAB91" - token_1 = b"FACE0FFF" - token_2 = b"BADE8CAF" - response = PartitionResponse( - partitions=[ - Partition(partition_token=token_1), - Partition(partition_token=token_2), - ], - transaction=Transaction(id=new_txn_id), - ) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.partition_read.return_value = response - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = multi_use - if w_txn: - derived._transaction_id = TXN_ID - - tokens = list( - derived.partition_read( - TABLE_NAME, - COLUMNS, - keyset, - index=index, - partition_size_bytes=size, - max_partitions=max_partitions, - ) - ) - - self.assertEqual(tokens, [token_1, token_2]) - - expected_txn_selector = TransactionSelector(id=TXN_ID) - - expected_partition_options = PartitionOptions( - partition_size_bytes=size, max_partitions=max_partitions - ) - - api.partition_read.assert_called_once_with( - session=self.SESSION_NAME, - table=TABLE_NAME, - columns=COLUMNS, - key_set=keyset._to_pb(), - transaction=expected_txn_selector, - index=index, - partition_options=expected_partition_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_partition_read_single_use_raises(self): - with self.assertRaises(ValueError): - self._partition_read_helper(multi_use=False, w_txn=True) - - def test_partition_read_wo_existing_transaction_raises(self): - with self.assertRaises(ValueError): - self._partition_read_helper(multi_use=True, w_txn=False) - - def test_partition_read_other_error(self): - from google.cloud.spanner_v1.keyset import KeySet - - keyset = KeySet(all_=True) - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.partition_read.side_effect = RuntimeError() - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = True - derived._transaction_id = TXN_ID - - with self.assertRaises(RuntimeError): - list(derived.partition_read(TABLE_NAME, COLUMNS, keyset)) - - def test_partition_read_ok_w_index_no_options(self): - self._partition_read_helper(multi_use=True, w_txn=True, index="index") - - def test_partition_read_ok_w_size(self): - self._partition_read_helper(multi_use=True, w_txn=True, size=2000) - - def test_partition_read_ok_w_max_partitions(self): - self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4) - - def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.types import Partition - from google.cloud.spanner_v1.types import PartitionOptions - from google.cloud.spanner_v1.types import PartitionResponse - from google.cloud.spanner_v1.types import Transaction - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - from google.cloud.spanner_v1._helpers import _make_value_pb - - new_txn_id = b"ABECAB91" - token_1 = b"FACE0FFF" - token_2 = b"BADE8CAF" - response = PartitionResponse( - partitions=[ - Partition(partition_token=token_1), - Partition(partition_token=token_2), - ], - transaction=Transaction(id=new_txn_id), - ) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.partition_query.return_value = response - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = multi_use - if w_txn: - derived._transaction_id = TXN_ID - - tokens = list( - derived.partition_query( - SQL_QUERY_WITH_PARAM, - PARAMS, - PARAM_TYPES, - partition_size_bytes=size, - max_partitions=max_partitions, - ) - ) - - self.assertEqual(tokens, [token_1, token_2]) - - expected_params = Struct( - fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} - ) - - expected_txn_selector = TransactionSelector(id=TXN_ID) - - expected_partition_options = PartitionOptions( - partition_size_bytes=size, max_partitions=max_partitions - ) - - api.partition_query.assert_called_once_with( - session=self.SESSION_NAME, - sql=SQL_QUERY_WITH_PARAM, - transaction=expected_txn_selector, - params=expected_params, - param_types=PARAM_TYPES, - partition_options=expected_partition_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_partition_query_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.partition_query.side_effect = RuntimeError() - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = True - derived._transaction_id = TXN_ID - - with self.assertRaises(RuntimeError): - list(derived.partition_query(SQL_QUERY)) - - def test_partition_query_w_params_wo_param_types(self): - database = _Database() - session = _Session(database) - derived = self._makeDerived(session) - derived._multi_use = True - derived._transaction_id = TXN_ID - - with self.assertRaises(ValueError): - list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS)) - - def test_partition_query_single_use_raises(self): - with self.assertRaises(ValueError): - self._partition_query_helper(multi_use=False, w_txn=True) - - def test_partition_query_wo_transaction_raises(self): - with self.assertRaises(ValueError): - self._partition_query_helper(multi_use=True, w_txn=False) - - def test_partition_query_ok_w_index_no_options(self): - self._partition_query_helper(multi_use=True, w_txn=True) - - def test_partition_query_ok_w_size(self): - self._partition_query_helper(multi_use=True, w_txn=True, size=2000) - - def test_partition_query_ok_w_max_partitions(self): - self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4) - - -class TestSnapshot(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - DATABASE_ID = "database-id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session-id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - - def _getTargetClass(self): - from google.cloud.spanner_v1.snapshot import Snapshot - - return Snapshot - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client - - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) - - def _makeTimestamp(self): - import datetime - from google.cloud._helpers import UTC - - return datetime.datetime.utcnow().replace(tzinfo=UTC) - - def _makeDuration(self, seconds=1, microseconds=0): - import datetime - - return datetime.timedelta(seconds=seconds, microseconds=microseconds) - - def test_ctor_defaults(self): - session = _Session() - snapshot = self._make_one(session) - self.assertIs(snapshot._session, session) - self.assertTrue(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertIsNone(snapshot._exact_staleness) - self.assertFalse(snapshot._multi_use) - - def test_ctor_w_multiple_options(self): - timestamp = self._makeTimestamp() - duration = self._makeDuration() - session = _Session() - - with self.assertRaises(ValueError): - self._make_one(session, read_timestamp=timestamp, max_staleness=duration) - - def test_ctor_w_read_timestamp(self): - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, read_timestamp=timestamp) - self.assertIs(snapshot._session, session) - self.assertFalse(snapshot._strong) - self.assertEqual(snapshot._read_timestamp, timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertIsNone(snapshot._exact_staleness) - self.assertFalse(snapshot._multi_use) - - def test_ctor_w_min_read_timestamp(self): - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, min_read_timestamp=timestamp) - self.assertIs(snapshot._session, session) - self.assertFalse(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertEqual(snapshot._min_read_timestamp, timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertIsNone(snapshot._exact_staleness) - self.assertFalse(snapshot._multi_use) - - def test_ctor_w_max_staleness(self): - duration = self._makeDuration() - session = _Session() - snapshot = self._make_one(session, max_staleness=duration) - self.assertIs(snapshot._session, session) - self.assertFalse(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertEqual(snapshot._max_staleness, duration) - self.assertIsNone(snapshot._exact_staleness) - self.assertFalse(snapshot._multi_use) - - def test_ctor_w_exact_staleness(self): - duration = self._makeDuration() - session = _Session() - snapshot = self._make_one(session, exact_staleness=duration) - self.assertIs(snapshot._session, session) - self.assertFalse(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertEqual(snapshot._exact_staleness, duration) - self.assertFalse(snapshot._multi_use) - - def test_ctor_w_multi_use(self): - session = _Session() - snapshot = self._make_one(session, multi_use=True) - self.assertTrue(snapshot._session is session) - self.assertTrue(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertIsNone(snapshot._exact_staleness) - self.assertTrue(snapshot._multi_use) - - def test_ctor_w_multi_use_and_read_timestamp(self): - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) - self.assertTrue(snapshot._session is session) - self.assertFalse(snapshot._strong) - self.assertEqual(snapshot._read_timestamp, timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertIsNone(snapshot._exact_staleness) - self.assertTrue(snapshot._multi_use) - - def test_ctor_w_multi_use_and_min_read_timestamp(self): - timestamp = self._makeTimestamp() - session = _Session() - - with self.assertRaises(ValueError): - self._make_one(session, min_read_timestamp=timestamp, multi_use=True) - - def test_ctor_w_multi_use_and_max_staleness(self): - duration = self._makeDuration() - session = _Session() - - with self.assertRaises(ValueError): - self._make_one(session, max_staleness=duration, multi_use=True) - - def test_ctor_w_multi_use_and_exact_staleness(self): - duration = self._makeDuration() - session = _Session() - snapshot = self._make_one(session, exact_staleness=duration, multi_use=True) - self.assertTrue(snapshot._session is session) - self.assertFalse(snapshot._strong) - self.assertIsNone(snapshot._read_timestamp) - self.assertIsNone(snapshot._min_read_timestamp) - self.assertIsNone(snapshot._max_staleness) - self.assertEqual(snapshot._exact_staleness, duration) - self.assertTrue(snapshot._multi_use) - - def test__make_txn_selector_w_transaction_id(self): - session = _Session() - snapshot = self._make_one(session) - snapshot._transaction_id = TXN_ID - selector = snapshot._make_txn_selector() - self.assertEqual(selector.id, TXN_ID) - - def test__make_txn_selector_strong(self): - session = _Session() - snapshot = self._make_one(session) - selector = snapshot._make_txn_selector() - options = selector.single_use - self.assertTrue(options.read_only.strong) - - def test__make_txn_selector_w_read_timestamp(self): - from google.cloud._helpers import _pb_timestamp_to_datetime - - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, read_timestamp=timestamp) - selector = snapshot._make_txn_selector() - options = selector.single_use - self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp - ) - - def test__make_txn_selector_w_min_read_timestamp(self): - from google.cloud._helpers import _pb_timestamp_to_datetime - - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, min_read_timestamp=timestamp) - selector = snapshot._make_txn_selector() - options = selector.single_use - self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp - ) - - def test__make_txn_selector_w_max_staleness(self): - duration = self._makeDuration(seconds=3, microseconds=123456) - session = _Session() - snapshot = self._make_one(session, max_staleness=duration) - selector = snapshot._make_txn_selector() - options = selector.single_use - self.assertEqual(options.read_only.max_staleness.seconds, 3) - self.assertEqual(options.read_only.max_staleness.nanos, 123456000) - - def test__make_txn_selector_w_exact_staleness(self): - duration = self._makeDuration(seconds=3, microseconds=123456) - session = _Session() - snapshot = self._make_one(session, exact_staleness=duration) - selector = snapshot._make_txn_selector() - options = selector.single_use - self.assertEqual(options.read_only.exact_staleness.seconds, 3) - self.assertEqual(options.read_only.exact_staleness.nanos, 123456000) - - def test__make_txn_selector_strong_w_multi_use(self): - session = _Session() - snapshot = self._make_one(session, multi_use=True) - selector = snapshot._make_txn_selector() - options = selector.begin - self.assertTrue(options.read_only.strong) - - def test__make_txn_selector_w_read_timestamp_w_multi_use(self): - from google.cloud._helpers import _pb_timestamp_to_datetime - - timestamp = self._makeTimestamp() - session = _Session() - snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) - selector = snapshot._make_txn_selector() - options = selector.begin - self.assertEqual( - _pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp - ) - - def test__make_txn_selector_w_exact_staleness_w_multi_use(self): - duration = self._makeDuration(seconds=3, microseconds=123456) - session = _Session() - snapshot = self._make_one(session, exact_staleness=duration, multi_use=True) - selector = snapshot._make_txn_selector() - options = selector.begin - self.assertEqual(options.read_only.exact_staleness.seconds, 3) - self.assertEqual(options.read_only.exact_staleness.nanos, 123456000) - - def test_begin_wo_multi_use(self): - session = _Session() - snapshot = self._make_one(session) - with self.assertRaises(ValueError): - snapshot.begin() - - def test_begin_w_read_request_count_gt_0(self): - session = _Session() - snapshot = self._make_one(session, multi_use=True) - snapshot._read_request_count = 1 - with self.assertRaises(ValueError): - snapshot.begin() - - def test_begin_w_existing_txn_id(self): - session = _Session() - snapshot = self._make_one(session, multi_use=True) - snapshot._transaction_id = TXN_ID - with self.assertRaises(ValueError): - snapshot.begin() - - def test_begin_w_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.begin_transaction.side_effect = RuntimeError() - timestamp = self._makeTimestamp() - session = _Session(database) - snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True) - - with self.assertRaises(RuntimeError): - snapshot.begin() - - def test_begin_ok_exact_staleness(self): - from google.protobuf.duration_pb2 import Duration - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - - transaction_pb = TransactionPB(id=TXN_ID) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.begin_transaction.return_value = transaction_pb - duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS) - session = _Session(database) - snapshot = self._make_one(session, exact_staleness=duration, multi_use=True) - - txn_id = snapshot.begin() - - self.assertEqual(txn_id, TXN_ID) - self.assertEqual(snapshot._transaction_id, TXN_ID) - - expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000) - expected_txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration) - ) - - api.begin_transaction.assert_called_once_with( - session.name, - expected_txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - def test_begin_ok_exact_strong(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - - transaction_pb = TransactionPB(id=TXN_ID) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.begin_transaction.return_value = transaction_pb - session = _Session(database) - snapshot = self._make_one(session, multi_use=True) - - txn_id = snapshot.begin() - - self.assertEqual(txn_id, TXN_ID) - self.assertEqual(snapshot._transaction_id, TXN_ID) - - expected_txn_options = TransactionOptions( - read_only=TransactionOptions.ReadOnly(strong=True) - ) - - api.begin_transaction.assert_called_once_with( - session.name, - expected_txn_options, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - -class _Session(object): - def __init__(self, database=None, name=TestSnapshot.SESSION_NAME): - self._database = database - self.name = name - - -class _Database(object): - name = "testing" - - -class _MockIterator(object): - def __init__(self, *values, **kw): - self._iter_values = iter(values) - self._fail_after = kw.pop("fail_after", False) - - def __iter__(self): - return self - - def __next__(self): - from google.api_core.exceptions import ServiceUnavailable - - try: - return next(self._iter_values) - except StopIteration: - if self._fail_after: - raise ServiceUnavailable("testing") - raise - - next = __next__ diff --git a/spanner/tests/unit/test_streamed.py b/spanner/tests/unit/test_streamed.py deleted file mode 100644 index 3f3a90108d99..000000000000 --- a/spanner/tests/unit/test_streamed.py +++ /dev/null @@ -1,1034 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - - -class TestStreamedResultSet(unittest.TestCase): - def _getTargetClass(self): - from google.cloud.spanner_v1.streamed import StreamedResultSet - - return StreamedResultSet - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - self.assertIs(streamed._response_iterator, iterator) - self.assertIsNone(streamed._source) - self.assertEqual(list(streamed), []) - self.assertIsNone(streamed.metadata) - self.assertIsNone(streamed.stats) - - def test_ctor_w_source(self): - iterator = _MockCancellableIterator() - source = object() - streamed = self._make_one(iterator, source=source) - self.assertIs(streamed._response_iterator, iterator) - self.assertIs(streamed._source, source) - self.assertEqual(list(streamed), []) - self.assertIsNone(streamed.metadata) - self.assertIsNone(streamed.stats) - - def test_fields_unset(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - with self.assertRaises(AttributeError): - streamed.fields - - @staticmethod - def _make_scalar_field(name, type_): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type - - return StructType.Field(name=name, type=Type(code=type_)) - - @staticmethod - def _make_array_field(name, element_type_code=None, element_type=None): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type - - if element_type is None: - element_type = Type(code=element_type_code) - array_type = Type(code="ARRAY", array_element_type=element_type) - return StructType.Field(name=name, type=array_type) - - @staticmethod - def _make_struct_type(struct_type_fields): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type - - fields = [ - StructType.Field(name=key, type=Type(code=value)) - for key, value in struct_type_fields - ] - struct_type = StructType(fields=fields) - return Type(code="STRUCT", struct_type=struct_type) - - @staticmethod - def _make_value(value): - from google.cloud.spanner_v1._helpers import _make_value_pb - - return _make_value_pb(value) - - @staticmethod - def _make_list_value(values=(), value_pbs=None): - from google.protobuf.struct_pb2 import ListValue - from google.protobuf.struct_pb2 import Value - from google.cloud.spanner_v1._helpers import _make_list_value_pb - - if value_pbs is not None: - return Value(list_value=ListValue(values=value_pbs)) - return Value(list_value=_make_list_value_pb(values)) - - @staticmethod - def _make_result_set_metadata(fields=(), transaction_id=None): - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetMetadata - - metadata = ResultSetMetadata() - for field in fields: - metadata.row_type.fields.add().CopyFrom(field) - if transaction_id is not None: - metadata.transaction.id = transaction_id - return metadata - - @staticmethod - def _make_result_set_stats(query_plan=None, **kw): - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetStats - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1._helpers import _make_value_pb - - query_stats = Struct( - fields={key: _make_value_pb(value) for key, value in kw.items()} - ) - return ResultSetStats(query_plan=query_plan, query_stats=query_stats) - - @staticmethod - def _make_partial_result_set( - values, metadata=None, stats=None, chunked_value=False - ): - from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet - - return PartialResultSet( - values=values, metadata=metadata, stats=stats, chunked_value=chunked_value - ) - - def test_properties_set(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - ] - metadata = streamed._metadata = self._make_result_set_metadata(FIELDS) - stats = streamed._stats = self._make_result_set_stats() - self.assertEqual(list(streamed.fields), FIELDS) - self.assertIs(streamed.metadata, metadata) - self.assertIs(streamed.stats, stats) - - def test__merge_chunk_bool(self): - from google.cloud.spanner_v1.streamed import Unmergeable - - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("registered_voter", "BOOL")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(True) - chunk = self._make_value(False) - - with self.assertRaises(Unmergeable): - streamed._merge_chunk(chunk) - - def test__merge_chunk_int64(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("age", "INT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(42) - chunk = self._make_value(13) - - merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.string_value, "4213") - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_float64_nan_string(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"Na") - chunk = self._make_value(u"N") - - merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.string_value, u"NaN") - - def test__merge_chunk_float64_w_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(3.14159) - chunk = self._make_value("") - - merged = streamed._merge_chunk(chunk) - self.assertEqual(merged.number_value, 3.14159) - - def test__merge_chunk_float64_w_float64(self): - from google.cloud.spanner_v1.streamed import Unmergeable - - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("weight", "FLOAT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(3.14159) - chunk = self._make_value(2.71828) - - with self.assertRaises(Unmergeable): - streamed._merge_chunk(chunk) - - def test__merge_chunk_string(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("name", "STRING")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"phred") - chunk = self._make_value(u"wylma") - - merged = streamed._merge_chunk(chunk) - - self.assertEqual(merged.string_value, u"phredwylma") - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_string_w_bytes(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_scalar_field("image", "BYTES")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value( - u"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA" - u"6fptVAAAACXBIWXMAAAsTAAALEwEAmpwYAAAA\n" - ) - chunk = self._make_value( - u"B3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0FNUExF" - u"MG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n" - ) - - merged = streamed._merge_chunk(chunk) - - self.assertEqual( - merged.string_value, - u"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAAAAAA6fptVAAAACXBIWXMAAAsTAAAL" - u"EwEAmpwYAAAA\nB3RJTUUH4QQGFwsBTL3HMwAAABJpVFh0Q29tbWVudAAAAAAAU0" - u"FNUExFMG3E+AAAAApJREFUCNdj\nYAAAAAIAAeIhvDMAAAAASUVORK5CYII=\n", - ) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_bool(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="BOOL")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([True, True]) - chunk = self._make_list_value([False, False, False]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([True, True, False, False, False]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_int(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="INT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([0, 1, 2]) - chunk = self._make_list_value([3, 4, 5]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([0, 1, 23, 4, 5]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_float(self): - import math - - PI = math.pi - EULER = math.e - SQRT_2 = math.sqrt(2.0) - LOG_10 = math.log(10) - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="FLOAT64")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([PI, SQRT_2]) - chunk = self._make_list_value(["", EULER, LOG_10]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([PI, SQRT_2, EULER, LOG_10]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_string_with_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([u"A", u"B", u"C"]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_string(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([None, u"D", u"E"]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([u"A", u"B", u"C", None, u"D", u"E"]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_string_with_null(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [self._make_array_field("name", element_type_code="STRING")] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value([u"A", u"B", u"C"]) - chunk = self._make_list_value([u"D", u"E"]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value([u"A", u"B", u"CD", u"E"]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_array_of_int(self): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type - - subarray_type = Type(code="ARRAY", array_element_type=Type(code="INT64")) - array_type = Type(code="ARRAY", array_element_type=subarray_type) - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [StructType.Field(name="loloi", type=array_type)] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value( - value_pbs=[self._make_list_value([0, 1]), self._make_list_value([2])] - ) - chunk = self._make_list_value( - value_pbs=[self._make_list_value([3]), self._make_list_value([4, 5])] - ) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value( - value_pbs=[ - self._make_list_value([0, 1]), - self._make_list_value([23]), - self._make_list_value([4, 5]), - ] - ) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_array_of_string(self): - from google.cloud.spanner_v1.proto.type_pb2 import StructType - from google.cloud.spanner_v1.proto.type_pb2 import Type - - subarray_type = Type(code="ARRAY", array_element_type=Type(code="STRING")) - array_type = Type(code="ARRAY", array_element_type=subarray_type) - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [StructType.Field(name="lolos", type=array_type)] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_list_value( - value_pbs=[ - self._make_list_value([u"A", u"B"]), - self._make_list_value([u"C"]), - ] - ) - chunk = self._make_list_value( - value_pbs=[ - self._make_list_value([u"D"]), - self._make_list_value([u"E", u"F"]), - ] - ) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value( - value_pbs=[ - self._make_list_value([u"A", u"B"]), - self._make_list_value([u"CD"]), - self._make_list_value([u"E", u"F"]), - ] - ) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_struct(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - struct_type = self._make_struct_type([("name", "STRING"), ("age", "INT64")]) - FIELDS = [self._make_array_field("test", element_type=struct_type)] - streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred "]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([u"Phlyntstone", 31]) - chunk = self._make_list_value(value_pbs=[rest]) - - merged = streamed._merge_chunk(chunk) - - struct = self._make_list_value([u"Phred Phlyntstone", 31]) - expected = self._make_list_value(value_pbs=[struct]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_struct_with_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - struct_type = self._make_struct_type([("name", "STRING"), ("age", "INT64")]) - FIELDS = [self._make_array_field("test", element_type=struct_type)] - streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred "]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([]) - chunk = self._make_list_value(value_pbs=[rest]) - - merged = streamed._merge_chunk(chunk) - - expected = self._make_list_value(value_pbs=[partial]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test__merge_chunk_array_of_struct_unmergeable(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - struct_type = self._make_struct_type( - [("name", "STRING"), ("registered", "BOOL"), ("voted", "BOOL")] - ) - FIELDS = [self._make_array_field("test", element_type=struct_type)] - streamed._metadata = self._make_result_set_metadata(FIELDS) - partial = self._make_list_value([u"Phred Phlyntstone", True]) - streamed._pending_chunk = self._make_list_value(value_pbs=[partial]) - rest = self._make_list_value([True]) - chunk = self._make_list_value(value_pbs=[rest]) - - merged = streamed._merge_chunk(chunk) - - struct = self._make_list_value([u"Phred Phlyntstone", True, True]) - expected = self._make_list_value(value_pbs=[struct]) - self.assertEqual(merged, expected) - self.assertIsNone(streamed._pending_chunk) - - def test_merge_values_empty_and_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._current_row = [] - streamed._merge_values([]) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, []) - - def test_merge_values_empty_and_partial(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] - streamed._current_row = [] - streamed._merge_values(VALUES) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BARE) - - def test_merge_values_empty_and_filled(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BARE = [u"Phred Phlyntstone", 42, True] - VALUES = [self._make_value(bare) for bare in BARE] - streamed._current_row = [] - streamed._merge_values(VALUES) - self.assertEqual(list(streamed), [BARE]) - self.assertEqual(streamed._current_row, []) - - def test_merge_values_empty_and_filled_plus(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BARE = [ - u"Phred Phlyntstone", - 42, - True, - u"Bharney Rhubble", - 39, - True, - u"Wylma Phlyntstone", - ] - VALUES = [self._make_value(bare) for bare in BARE] - streamed._current_row = [] - streamed._merge_values(VALUES) - self.assertEqual(list(streamed), [BARE[0:3], BARE[3:6]]) - self.assertEqual(streamed._current_row, BARE[6:]) - - def test_merge_values_partial_and_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BEFORE = [u"Phred Phlyntstone"] - streamed._current_row[:] = BEFORE - streamed._merge_values([]) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BEFORE) - - def test_merge_values_partial_and_partial(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BEFORE = [u"Phred Phlyntstone"] - streamed._current_row[:] = BEFORE - MERGED = [42] - TO_MERGE = [self._make_value(item) for item in MERGED] - streamed._merge_values(TO_MERGE) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BEFORE + MERGED) - - def test_merge_values_partial_and_filled(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BEFORE = [u"Phred Phlyntstone"] - streamed._current_row[:] = BEFORE - MERGED = [42, True] - TO_MERGE = [self._make_value(item) for item in MERGED] - streamed._merge_values(TO_MERGE) - self.assertEqual(list(streamed), [BEFORE + MERGED]) - self.assertEqual(streamed._current_row, []) - - def test_merge_values_partial_and_filled_plus(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - streamed._metadata = self._make_result_set_metadata(FIELDS) - BEFORE = [self._make_value(u"Phred Phlyntstone")] - streamed._current_row[:] = BEFORE - MERGED = [42, True, u"Bharney Rhubble", 39, True, u"Wylma Phlyntstone"] - TO_MERGE = [self._make_value(item) for item in MERGED] - VALUES = BEFORE + MERGED - streamed._merge_values(TO_MERGE) - self.assertEqual(list(streamed), [VALUES[0:3], VALUES[3:6]]) - self.assertEqual(streamed._current_row, VALUES[6:]) - - def test_one_or_none_no_value(self): - streamed = self._make_one(_MockCancellableIterator()) - with mock.patch.object(streamed, "_consume_next") as consume_next: - consume_next.side_effect = StopIteration - self.assertIsNone(streamed.one_or_none()) - - def test_one_or_none_single_value(self): - streamed = self._make_one(_MockCancellableIterator()) - streamed._rows = ["foo"] - with mock.patch.object(streamed, "_consume_next") as consume_next: - consume_next.side_effect = StopIteration - self.assertEqual(streamed.one_or_none(), "foo") - - def test_one_or_none_multiple_values(self): - streamed = self._make_one(_MockCancellableIterator()) - streamed._rows = ["foo", "bar"] - with self.assertRaises(ValueError): - streamed.one_or_none() - - def test_one_or_none_consumed_stream(self): - streamed = self._make_one(_MockCancellableIterator()) - streamed._metadata = object() - with self.assertRaises(RuntimeError): - streamed.one_or_none() - - def test_one_single_value(self): - streamed = self._make_one(_MockCancellableIterator()) - streamed._rows = ["foo"] - with mock.patch.object(streamed, "_consume_next") as consume_next: - consume_next.side_effect = StopIteration - self.assertEqual(streamed.one(), "foo") - - def test_one_no_value(self): - from google.cloud import exceptions - - iterator = _MockCancellableIterator(["foo"]) - streamed = self._make_one(iterator) - with mock.patch.object(streamed, "_consume_next") as consume_next: - consume_next.side_effect = StopIteration - with self.assertRaises(exceptions.NotFound): - streamed.one() - - def test_consume_next_empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - with self.assertRaises(StopIteration): - streamed._consume_next() - - def test_consume_next_first_set_partial(self): - TXN_ID = b"DEADBEEF" - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS, transaction_id=TXN_ID) - BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES, metadata=metadata) - iterator = _MockCancellableIterator(result_set) - source = mock.Mock(_transaction_id=None, spec=["_transaction_id"]) - streamed = self._make_one(iterator, source=source) - streamed._consume_next() - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BARE) - self.assertEqual(streamed.metadata, metadata) - self.assertEqual(source._transaction_id, TXN_ID) - - def test_consume_next_first_set_partial_existing_txn_id(self): - TXN_ID = b"DEADBEEF" - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS, transaction_id=b"") - BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES, metadata=metadata) - iterator = _MockCancellableIterator(result_set) - source = mock.Mock(_transaction_id=TXN_ID, spec=["_transaction_id"]) - streamed = self._make_one(iterator, source=source) - streamed._consume_next() - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BARE) - self.assertEqual(streamed.metadata, metadata) - self.assertEqual(source._transaction_id, TXN_ID) - - def test_consume_next_w_partial_result(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - VALUES = [self._make_value(u"Phred ")] - result_set = self._make_partial_result_set(VALUES, chunked_value=True) - iterator = _MockCancellableIterator(result_set) - streamed = self._make_one(iterator) - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._consume_next() - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, []) - self.assertEqual(streamed._pending_chunk, VALUES[0]) - - def test_consume_next_w_pending_chunk(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - BARE = [ - u"Phlyntstone", - 42, - True, - u"Bharney Rhubble", - 39, - True, - u"Wylma Phlyntstone", - ] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES) - iterator = _MockCancellableIterator(result_set) - streamed = self._make_one(iterator) - streamed._metadata = self._make_result_set_metadata(FIELDS) - streamed._pending_chunk = self._make_value(u"Phred ") - streamed._consume_next() - self.assertEqual( - list(streamed), - [[u"Phred Phlyntstone", BARE[1], BARE[2]], [BARE[3], BARE[4], BARE[5]]], - ) - self.assertEqual(streamed._current_row, [BARE[6]]) - self.assertIsNone(streamed._pending_chunk) - - def test_consume_next_last_set(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS) - stats = self._make_result_set_stats( - rows_returned="1", elapsed_time="1.23 secs", cpu_time="0.98 secs" - ) - BARE = [u"Phred Phlyntstone", 42, True] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES, stats=stats) - iterator = _MockCancellableIterator(result_set) - streamed = self._make_one(iterator) - streamed._metadata = metadata - streamed._consume_next() - self.assertEqual(list(streamed), [BARE]) - self.assertEqual(streamed._current_row, []) - self.assertEqual(streamed._stats, stats) - - def test___iter___empty(self): - iterator = _MockCancellableIterator() - streamed = self._make_one(iterator) - found = list(streamed) - self.assertEqual(found, []) - - def test___iter___one_result_set_partial(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS) - BARE = [u"Phred Phlyntstone", 42] - VALUES = [self._make_value(bare) for bare in BARE] - result_set = self._make_partial_result_set(VALUES, metadata=metadata) - iterator = _MockCancellableIterator(result_set) - streamed = self._make_one(iterator) - found = list(streamed) - self.assertEqual(found, []) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, BARE) - self.assertEqual(streamed.metadata, metadata) - - def test___iter___multiple_result_sets_filled(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS) - BARE = [ - u"Phred Phlyntstone", - 42, - True, - u"Bharney Rhubble", - 39, - True, - u"Wylma Phlyntstone", - 41, - True, - ] - VALUES = [self._make_value(bare) for bare in BARE] - result_set1 = self._make_partial_result_set(VALUES[:4], metadata=metadata) - result_set2 = self._make_partial_result_set(VALUES[4:]) - iterator = _MockCancellableIterator(result_set1, result_set2) - streamed = self._make_one(iterator) - found = list(streamed) - self.assertEqual( - found, - [ - [BARE[0], BARE[1], BARE[2]], - [BARE[3], BARE[4], BARE[5]], - [BARE[6], BARE[7], BARE[8]], - ], - ) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, []) - self.assertIsNone(streamed._pending_chunk) - - def test___iter___w_existing_rows_read(self): - FIELDS = [ - self._make_scalar_field("full_name", "STRING"), - self._make_scalar_field("age", "INT64"), - self._make_scalar_field("married", "BOOL"), - ] - metadata = self._make_result_set_metadata(FIELDS) - ALREADY = [[u"Pebbylz Phlyntstone", 4, False], [u"Dino Rhubble", 4, False]] - BARE = [ - u"Phred Phlyntstone", - 42, - True, - u"Bharney Rhubble", - 39, - True, - u"Wylma Phlyntstone", - 41, - True, - ] - VALUES = [self._make_value(bare) for bare in BARE] - result_set1 = self._make_partial_result_set(VALUES[:4], metadata=metadata) - result_set2 = self._make_partial_result_set(VALUES[4:]) - iterator = _MockCancellableIterator(result_set1, result_set2) - streamed = self._make_one(iterator) - streamed._rows[:] = ALREADY - found = list(streamed) - self.assertEqual( - found, - ALREADY - + [ - [BARE[0], BARE[1], BARE[2]], - [BARE[3], BARE[4], BARE[5]], - [BARE[6], BARE[7], BARE[8]], - ], - ) - self.assertEqual(list(streamed), []) - self.assertEqual(streamed._current_row, []) - self.assertIsNone(streamed._pending_chunk) - - -class _MockCancellableIterator(object): - - cancel_calls = 0 - - def __init__(self, *values): - self.iter_values = iter(values) - - def next(self): - return next(self.iter_values) - - def __next__(self): # pragma: NO COVER Py3k - return self.next() - - -class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase): - - _json_tests = None - - def _getTargetClass(self): - from google.cloud.spanner_v1.streamed import StreamedResultSet - - return StreamedResultSet - - def _make_one(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _load_json_test(self, test_name): - import os - - if self.__class__._json_tests is None: - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, "streaming-read-acceptance-test.json") - raw = _parse_streaming_read_acceptance_tests(filename) - tests = self.__class__._json_tests = {} - for (name, partial_result_sets, results) in raw: - tests[name] = partial_result_sets, results - return self.__class__._json_tests[test_name] - - # Non-error cases - - def _match_results(self, testcase_name, assert_equality=None): - partial_result_sets, expected = self._load_json_test(testcase_name) - iterator = _MockCancellableIterator(*partial_result_sets) - partial = self._make_one(iterator) - if assert_equality is not None: - assert_equality(list(partial), expected) - else: - self.assertEqual(list(partial), expected) - - def test_basic(self): - self._match_results("Basic Test") - - def test_string_chunking(self): - self._match_results("String Chunking Test") - - def test_string_array_chunking(self): - self._match_results("String Array Chunking Test") - - def test_string_array_chunking_with_nulls(self): - self._match_results("String Array Chunking Test With Nulls") - - def test_string_array_chunking_with_empty_strings(self): - self._match_results("String Array Chunking Test With Empty Strings") - - def test_string_array_chunking_with_one_large_string(self): - self._match_results("String Array Chunking Test With One Large String") - - def test_int64_array_chunking(self): - self._match_results("INT64 Array Chunking Test") - - def test_float64_array_chunking(self): - import math - - def assert_float_equality(lhs, rhs): - # NaN, +Inf, and -Inf can't be tested for equality - if lhs is None: - self.assertIsNone(rhs) - elif math.isnan(lhs): - self.assertTrue(math.isnan(rhs)) - elif math.isinf(lhs): - self.assertTrue(math.isinf(rhs)) - # but +Inf and -Inf can be tested for magnitude - self.assertTrue((lhs > 0) == (rhs > 0)) - else: - self.assertEqual(lhs, rhs) - - def assert_rows_equality(lhs, rhs): - self.assertEqual(len(lhs), len(rhs)) - for l_rows, r_rows in zip(lhs, rhs): - self.assertEqual(len(l_rows), len(r_rows)) - for l_row, r_row in zip(l_rows, r_rows): - self.assertEqual(len(l_row), len(r_row)) - for l_cell, r_cell in zip(l_row, r_row): - assert_float_equality(l_cell, r_cell) - - self._match_results("FLOAT64 Array Chunking Test", assert_rows_equality) - - def test_struct_array_chunking(self): - self._match_results("Struct Array Chunking Test") - - def test_nested_struct_array(self): - self._match_results("Nested Struct Array Test") - - def test_nested_struct_array_chunking(self): - self._match_results("Nested Struct Array Chunking Test") - - def test_struct_array_and_string_chunking(self): - self._match_results("Struct Array And String Chunking Test") - - def test_multiple_row_single_chunk(self): - self._match_results("Multiple Row Single Chunk") - - def test_multiple_row_multiple_chunks(self): - self._match_results("Multiple Row Multiple Chunks") - - def test_multiple_row_chunks_non_chunks_interleaved(self): - self._match_results("Multiple Row Chunks/Non Chunks Interleaved") - - -def _generate_partial_result_sets(prs_text_pbs): - from google.protobuf.json_format import Parse - from google.cloud.spanner_v1.proto.result_set_pb2 import PartialResultSet - - partial_result_sets = [] - - for prs_text_pb in prs_text_pbs: - prs = PartialResultSet() - partial_result_sets.append(Parse(prs_text_pb, prs)) - - return partial_result_sets - - -def _normalize_int_array(cell): - normalized = [] - for subcell in cell: - if subcell is not None: - subcell = int(subcell) - normalized.append(subcell) - return normalized - - -def _normalize_float(cell): - if cell == u"Infinity": - return float("inf") - if cell == u"-Infinity": - return float("-inf") - if cell == u"NaN": - return float("nan") - if cell is not None: - return float(cell) - - -def _normalize_results(rows_data, fields): - """Helper for _parse_streaming_read_acceptance_tests""" - from google.cloud.spanner_v1.proto import type_pb2 - - normalized = [] - for row_data in rows_data: - row = [] - assert len(row_data) == len(fields) - for cell, field in zip(row_data, fields): - if field.type.code == type_pb2.INT64: - cell = int(cell) - if field.type.code == type_pb2.FLOAT64: - cell = _normalize_float(cell) - elif field.type.code == type_pb2.BYTES: - cell = cell.encode("utf8") - elif field.type.code == type_pb2.ARRAY: - if field.type.array_element_type.code == type_pb2.INT64: - cell = _normalize_int_array(cell) - elif field.type.array_element_type.code == type_pb2.FLOAT64: - cell = [_normalize_float(subcell) for subcell in cell] - row.append(cell) - normalized.append(row) - return normalized - - -def _parse_streaming_read_acceptance_tests(filename): - """Parse acceptance tests from JSON - - See streaming-read-acceptance-test.json - """ - import json - - with open(filename) as json_file: - test_json = json.load(json_file) - - for test in test_json["tests"]: - name = test["name"] - partial_result_sets = _generate_partial_result_sets(test["chunks"]) - fields = partial_result_sets[0].metadata.row_type.fields - result = _normalize_results(test["result"]["value"], fields) - yield name, partial_result_sets, result diff --git a/spanner/tests/unit/test_transaction.py b/spanner/tests/unit/test_transaction.py deleted file mode 100644 index cceff89fcaac..000000000000 --- a/spanner/tests/unit/test_transaction.py +++ /dev/null @@ -1,598 +0,0 @@ -# Copyright 2016 Google LLC All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest - -import mock - - -TABLE_NAME = "citizens" -COLUMNS = ["email", "first_name", "last_name", "age"] -VALUES = [ - ["phred@exammple.com", "Phred", "Phlyntstone", 32], - ["bharney@example.com", "Bharney", "Rhubble", 31], -] -DML_QUERY = """\ -INSERT INTO citizens(first_name, last_name, age) -VALUES ("Phred", "Phlyntstone", 32) -""" -DML_QUERY_WITH_PARAM = """ -INSERT INTO citizens(first_name, last_name, age) -VALUES ("Phred", "Phlyntstone", @age) -""" -PARAMS = {"age": 30} -PARAM_TYPES = {"age": "INT64"} - - -class TestTransaction(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - DATABASE_ID = "database-id" - DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID - SESSION_ID = "session-id" - SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID - TRANSACTION_ID = b"DEADBEEF" - - def _getTargetClass(self): - from google.cloud.spanner_v1.transaction import Transaction - - return Transaction - - def _make_one(self, session, *args, **kwargs): - transaction = self._getTargetClass()(session, *args, **kwargs) - session._transaction = transaction - return transaction - - def _make_spanner_api(self): - import google.cloud.spanner_v1.gapic.spanner_client - - return mock.create_autospec( - google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True - ) - - def test_ctor_session_w_existing_txn(self): - session = _Session() - session._transaction = object() - with self.assertRaises(ValueError): - self._make_one(session) - - def test_ctor_defaults(self): - session = _Session() - transaction = self._make_one(session) - self.assertIs(transaction._session, session) - self.assertIsNone(transaction._transaction_id) - self.assertIsNone(transaction.committed) - self.assertFalse(transaction._rolled_back) - self.assertTrue(transaction._multi_use) - self.assertEqual(transaction._execute_sql_count, 0) - - def test__check_state_not_begun(self): - session = _Session() - transaction = self._make_one(session) - with self.assertRaises(ValueError): - transaction._check_state() - - def test__check_state_already_committed(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.committed = object() - with self.assertRaises(ValueError): - transaction._check_state() - - def test__check_state_already_rolled_back(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True - with self.assertRaises(ValueError): - transaction._check_state() - - def test__check_state_ok(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._check_state() # does not raise - - def test__make_txn_selector(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - selector = transaction._make_txn_selector() - self.assertEqual(selector.id, self.TRANSACTION_ID) - - def test_begin_already_begun(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - with self.assertRaises(ValueError): - transaction.begin() - - def test_begin_already_rolled_back(self): - session = _Session() - transaction = self._make_one(session) - transaction._rolled_back = True - with self.assertRaises(ValueError): - transaction.begin() - - def test_begin_already_committed(self): - session = _Session() - transaction = self._make_one(session) - transaction.committed = object() - with self.assertRaises(ValueError): - transaction.begin() - - def test_begin_w_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.begin_transaction.side_effect = RuntimeError() - session = _Session(database) - transaction = self._make_one(session) - - with self.assertRaises(RuntimeError): - transaction.begin() - - def test_begin_ok(self): - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) - - transaction_pb = TransactionPB(id=self.TRANSACTION_ID) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI( - _begin_transaction_response=transaction_pb - ) - session = _Session(database) - transaction = self._make_one(session) - - txn_id = transaction.begin() - - self.assertEqual(txn_id, self.TRANSACTION_ID) - self.assertEqual(transaction._transaction_id, self.TRANSACTION_ID) - - session_id, txn_options, metadata = api._begun - self.assertEqual(session_id, session.name) - self.assertTrue(txn_options.HasField("read_write")) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_rollback_not_begun(self): - session = _Session() - transaction = self._make_one(session) - with self.assertRaises(ValueError): - transaction.rollback() - - def test_rollback_already_committed(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.committed = object() - with self.assertRaises(ValueError): - transaction.rollback() - - def test_rollback_already_rolled_back(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True - with self.assertRaises(ValueError): - transaction.rollback() - - def test_rollback_w_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.rollback.side_effect = RuntimeError("other error") - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.insert(TABLE_NAME, COLUMNS, VALUES) - - with self.assertRaises(RuntimeError): - transaction.rollback() - - self.assertFalse(transaction._rolled_back) - - def test_rollback_ok(self): - from google.protobuf.empty_pb2 import Empty - - empty_pb = Empty() - database = _Database() - api = database.spanner_api = _FauxSpannerAPI(_rollback_response=empty_pb) - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.replace(TABLE_NAME, COLUMNS, VALUES) - - transaction.rollback() - - self.assertTrue(transaction._rolled_back) - self.assertIsNone(session._transaction) - - session_id, txn_id, metadata = api._rolled_back - self.assertEqual(session_id, session.name) - self.assertEqual(txn_id, self.TRANSACTION_ID) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_commit_not_begun(self): - session = _Session() - transaction = self._make_one(session) - with self.assertRaises(ValueError): - transaction.commit() - - def test_commit_already_committed(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.committed = object() - with self.assertRaises(ValueError): - transaction.commit() - - def test_commit_already_rolled_back(self): - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True - with self.assertRaises(ValueError): - transaction.commit() - - def test_commit_w_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.commit.side_effect = RuntimeError() - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction.replace(TABLE_NAME, COLUMNS, VALUES) - - with self.assertRaises(RuntimeError): - transaction.commit() - - self.assertIsNone(transaction.committed) - - def _commit_helper(self, mutate=True): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.keyset import KeySet - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - keys = [[0], [1], [2]] - keyset = KeySet(keys=keys) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI(_commit_response=response) - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - - if mutate: - transaction.delete(TABLE_NAME, keyset) - - transaction.commit() - - self.assertEqual(transaction.committed, now) - self.assertIsNone(session._transaction) - - session_id, mutations, txn_id, metadata = api._committed - self.assertEqual(session_id, session.name) - self.assertEqual(txn_id, self.TRANSACTION_ID) - self.assertEqual(mutations, transaction._mutations) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_commit_no_mutations(self): - self._commit_helper(mutate=False) - - def test_commit_w_mutations(self): - self._commit_helper(mutate=True) - - def test__make_params_pb_w_params_wo_param_types(self): - session = _Session() - transaction = self._make_one(session) - - with self.assertRaises(ValueError): - transaction._make_params_pb(PARAMS, None) - - def test__make_params_pb_wo_params_w_param_types(self): - session = _Session() - transaction = self._make_one(session) - - with self.assertRaises(ValueError): - transaction._make_params_pb(None, PARAM_TYPES) - - def test__make_params_pb_w_params_w_param_types(self): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1._helpers import _make_value_pb - - session = _Session() - transaction = self._make_one(session) - - params_pb = transaction._make_params_pb(PARAMS, PARAM_TYPES) - - expected_params = Struct( - fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} - ) - self.assertEqual(params_pb, expected_params) - - def test_execute_update_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.execute_sql.side_effect = RuntimeError() - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - - with self.assertRaises(RuntimeError): - transaction.execute_update(DML_QUERY) - - def test_execute_update_w_params_wo_param_types(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - session = _Session(database) - session = _Session() - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - - with self.assertRaises(ValueError): - transaction.execute_update(DML_QUERY_WITH_PARAM, PARAMS) - - def _execute_update_helper(self, count=0): - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ( - ResultSet, - ResultSetStats, - ) - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - from google.cloud.spanner_v1._helpers import _make_value_pb - - MODE = 2 # PROFILE - stats_pb = ResultSetStats(row_count_exact=1) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.execute_sql.return_value = ResultSet(stats=stats_pb) - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._execute_sql_count = count - - row_count = transaction.execute_update( - DML_QUERY_WITH_PARAM, PARAMS, PARAM_TYPES, query_mode=MODE - ) - - self.assertEqual(row_count, 1) - - expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) - expected_params = Struct( - fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} - ) - - api.execute_sql.assert_called_once_with( - self.SESSION_NAME, - DML_QUERY_WITH_PARAM, - transaction=expected_transaction, - params=expected_params, - param_types=PARAM_TYPES, - query_mode=MODE, - seqno=count, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - self.assertEqual(transaction._execute_sql_count, count + 1) - - def test_execute_update_new_transaction(self): - self._execute_update_helper() - - def test_execute_update_w_count(self): - self._execute_update_helper(count=1) - - def test_batch_update_other_error(self): - database = _Database() - database.spanner_api = self._make_spanner_api() - database.spanner_api.execute_batch_dml.side_effect = RuntimeError() - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - - with self.assertRaises(RuntimeError): - transaction.batch_update(statements=[DML_QUERY]) - - def _batch_update_helper(self, error_after=None, count=0): - from google.rpc.status_pb2 import Status - from google.protobuf.struct_pb2 import Struct - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSet - from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetStats - from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteBatchDmlResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - from google.cloud.spanner_v1._helpers import _make_value_pb - - insert_dml = "INSERT INTO table(pkey, desc) VALUES (%pkey, %desc)" - insert_params = {"pkey": 12345, "desc": "DESCRIPTION"} - insert_param_types = {"pkey": "INT64", "desc": "STRING"} - update_dml = 'UPDATE table SET desc = desc + "-amended"' - delete_dml = "DELETE FROM table WHERE desc IS NULL" - - dml_statements = [ - (insert_dml, insert_params, insert_param_types), - update_dml, - delete_dml, - ] - - stats_pbs = [ - ResultSetStats(row_count_exact=1), - ResultSetStats(row_count_exact=2), - ResultSetStats(row_count_exact=3), - ] - if error_after is not None: - stats_pbs = stats_pbs[:error_after] - expected_status = Status(code=400) - else: - expected_status = Status(code=200) - expected_row_counts = [stats.row_count_exact for stats in stats_pbs] - - response = ExecuteBatchDmlResponse( - status=expected_status, - result_sets=[ResultSet(stats=stats_pb) for stats_pb in stats_pbs], - ) - database = _Database() - api = database.spanner_api = self._make_spanner_api() - api.execute_batch_dml.return_value = response - session = _Session(database) - transaction = self._make_one(session) - transaction._transaction_id = self.TRANSACTION_ID - transaction._execute_sql_count = count - - status, row_counts = transaction.batch_update(dml_statements) - - self.assertEqual(status, expected_status) - self.assertEqual(row_counts, expected_row_counts) - - expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) - expected_insert_params = Struct( - fields={ - key: _make_value_pb(value) for (key, value) in insert_params.items() - } - ) - expected_statements = [ - { - "sql": insert_dml, - "params": expected_insert_params, - "param_types": insert_param_types, - }, - {"sql": update_dml}, - {"sql": delete_dml}, - ] - - api.execute_batch_dml.assert_called_once_with( - session=self.SESSION_NAME, - transaction=expected_transaction, - statements=expected_statements, - seqno=count, - metadata=[("google-cloud-resource-prefix", database.name)], - ) - - self.assertEqual(transaction._execute_sql_count, count + 1) - - def test_batch_update_wo_errors(self): - self._batch_update_helper() - - def test_batch_update_w_errors(self): - self._batch_update_helper(error_after=2, count=1) - - def test_context_mgr_success(self): - import datetime - from google.cloud.spanner_v1.proto.spanner_pb2 import CommitResponse - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - - transaction_pb = TransactionPB(id=self.TRANSACTION_ID) - database = _Database() - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_pb = _datetime_to_pb_timestamp(now) - response = CommitResponse(commit_timestamp=now_pb) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI( - _begin_transaction_response=transaction_pb, _commit_response=response - ) - session = _Session(database) - transaction = self._make_one(session) - - with transaction: - transaction.insert(TABLE_NAME, COLUMNS, VALUES) - - self.assertEqual(transaction.committed, now) - - session_id, mutations, txn_id, metadata = api._committed - self.assertEqual(session_id, self.SESSION_NAME) - self.assertEqual(txn_id, self.TRANSACTION_ID) - self.assertEqual(mutations, transaction._mutations) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - def test_context_mgr_failure(self): - from google.protobuf.empty_pb2 import Empty - - empty_pb = Empty() - from google.cloud.spanner_v1.proto.transaction_pb2 import ( - Transaction as TransactionPB, - ) - - transaction_pb = TransactionPB(id=self.TRANSACTION_ID) - database = _Database() - api = database.spanner_api = _FauxSpannerAPI( - _begin_transaction_response=transaction_pb, _rollback_response=empty_pb - ) - session = _Session(database) - transaction = self._make_one(session) - - with self.assertRaises(Exception): - with transaction: - transaction.insert(TABLE_NAME, COLUMNS, VALUES) - raise Exception("bail out") - - self.assertEqual(transaction.committed, None) - self.assertTrue(transaction._rolled_back) - self.assertEqual(len(transaction._mutations), 1) - - self.assertEqual(api._committed, None) - - session_id, txn_id, metadata = api._rolled_back - self.assertEqual(session_id, session.name) - self.assertEqual(txn_id, self.TRANSACTION_ID) - self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) - - -class _Database(object): - name = "testing" - - -class _Session(object): - - _transaction = None - - def __init__(self, database=None, name=TestTransaction.SESSION_NAME): - self._database = database - self.name = name - - -class _FauxSpannerAPI(object): - - _committed = None - - def __init__(self, **kwargs): - self.__dict__.update(**kwargs) - - def begin_transaction(self, session, options_, metadata=None): - self._begun = (session, options_, metadata) - return self._begin_transaction_response - - def rollback(self, session, transaction_id, metadata=None): - self._rolled_back = (session, transaction_id, metadata) - return self._rollback_response - - def commit( - self, - session, - mutations, - transaction_id="", - single_use_transaction=None, - metadata=None, - ): - assert single_use_transaction is None - self._committed = (session, mutations, transaction_id, metadata) - return self._commit_response diff --git a/storage/.coveragerc b/storage/.coveragerc deleted file mode 100644 index 098720f672e1..000000000000 --- a/storage/.coveragerc +++ /dev/null @@ -1,16 +0,0 @@ -[run] -branch = True - -[report] -fail_under = 100 -show_missing = True -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py diff --git a/storage/.flake8 b/storage/.flake8 deleted file mode 100644 index 61766fa84d02..000000000000 --- a/storage/.flake8 +++ /dev/null @@ -1,13 +0,0 @@ -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - *_pb2.py - - # Standard linting exemptions. - __pycache__, - .git, - *.pyc, - conf.py diff --git a/storage/.repo-metadata.json b/storage/.repo-metadata.json deleted file mode 100644 index 0468fe0d51e7..000000000000 --- a/storage/.repo-metadata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "storage", - "name_pretty": "Google Cloud Storage", - "product_documentation": "https://cloud.google.com/storage", - "client_documentation": "https://googleapis.dev/python/storage/latest", - "issue_tracker": "https://issuetracker.google.com/savedsearches/559782", - "release_level": "ga", - "language": "python", - "repo": "googleapis/google-cloud-python", - "distribution_name": "google-cloud-storage", - "api_id": "storage.googleapis.com", - "requires_billing": true -} \ No newline at end of file diff --git a/storage/CHANGELOG.md b/storage/CHANGELOG.md deleted file mode 100644 index 4cd1577f6d21..000000000000 --- a/storage/CHANGELOG.md +++ /dev/null @@ -1,404 +0,0 @@ -# Changelog - -[PyPI History][1] - -[1]: https://pypi.org/project/google-cloud-storage/#history - -## 1.25.0 - -01-16-2020 11:00 PST - -### Implementation Changes -- fix: replace unsafe six.PY3 with PY2 for better future compatibility with Python 4 ([#10081](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/10081)) -- fix(storage): fix document of delete blob ([#10015](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/10015)) - -### New Features -- feat(storage): support optionsRequestedPolicyVersion ([#9989](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/9989)) - -### Dependencies -- chore(storage): bump core dependency to 1.2.0 ([#10160](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/10160)) - -## 1.24.1 - -01-02-2020 13:20 PST - - -### Implementation Changes -- Add 'ARCHIVE' storage class ([#9533](https://github.com/googleapis/google-cloud-python/pull/9533)) - -## 1.24.0 - -01-02-2020 10:39 PST - - -### Implementation Changes --str() metadata for for blob ([#9796](https://github.com/googleapis/google-cloud-python/pull/9796)) - -### New Features -- Add timeout parameter to Batch interface to match google-cloud-core ([#10010](https://github.com/googleapis/google-cloud-python/pull/10010)) - -## 1.23.0 - -11-12-2019 12:57 PST - - -### Implementation Changes -- Move `create_bucket` implementation from `Bucket` to `Client`. ([#8604](https://github.com/googleapis/google-cloud-python/pull/8604)) - -### New Features -- Add opt-in raw download support. ([#9572](https://github.com/googleapis/google-cloud-python/pull/9572)) - -### Dependencies -- Pin `google-resumable-media >= 0.5.0, < 0.6dev`. ([#9572](https://github.com/googleapis/google-cloud-python/pull/9572)) - -### Documentation -- Add python 2 sunset banner to documentation. ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036)) - -### Internal / Testing Changes -- Fix query-string order dependent assert. ([#9728](https://github.com/googleapis/google-cloud-python/pull/9728)) -- Normalize VPCSC configuration in system tests. ([#9616](https://github.com/googleapis/google-cloud-python/pull/9616)) - -## 1.22.0 - -11-05-2019 10:22 PST - - -### New Features -- Add UBLA attrs to IAMConfiguration. ([#9475](https://github.com/googleapis/google-cloud-python/pull/9475)) - -## 1.21.0 - -10-28-2019 21:52 PDT - -### Implementation Changes -- Add gcloud-python header to user agent ([#9551](https://github.com/googleapis/google-cloud-python/pull/9551)) -- Don't report a gapic version for storage ([#9549](https://github.com/googleapis/google-cloud-python/pull/9549)) -- Update storage endpoint from www.googleapis.com to storage.googleapis.com ([#9543](https://github.com/googleapis/google-cloud-python/pull/9543)) -- Call anonymous client method to remove dependency of google application credentials ([#9455](https://github.com/googleapis/google-cloud-python/pull/9455)) -- Enable CSEK w/ V4 signed URLs ([#9450](https://github.com/googleapis/google-cloud-python/pull/9450)) - -### New Features -- Support predefined ACLs in `Bucket.create` ([#9334](https://github.com/googleapis/google-cloud-python/pull/9334)) - -### Documentation -- Add `hmac_key` and notification documentation rst files ([#9529](https://github.com/googleapis/google-cloud-python/pull/9529)) -- Remove references to the old authentication credentials ([#9456](https://github.com/googleapis/google-cloud-python/pull/9456)) -- Clarify docstring for `Blob.download_as_string` ([#9332](https://github.com/googleapis/google-cloud-python/pull/9332)) - -## 1.20.0 - -09-26-2019 06:45 PDT - - -### New Features -- Add `user_project` param to HMAC-related methods. ([#9237](https://github.com/googleapis/google-cloud-python/pull/9237)) -- Add `Blob.from_string` and `Bucket.from_string` factories. ([#9143](https://github.com/googleapis/google-cloud-python/pull/9143)) - -### Documentation -- Fix intersphinx reference to `requests`. ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294)) -- Fix deep / broken URL for service account setup. ([#9164](https://github.com/googleapis/google-cloud-python/pull/9164)) - -### Internal / Testing Changes -- Fix typo in `_helpers.py`. ([#9239](https://github.com/googleapis/google-cloud-python/pull/9239)) -- In systests, retry bucket creation on 503. ([#9248](https://github.com/googleapis/google-cloud-python/pull/9248)) -- Avoid using `REGIONAL` / `MULTI_REGIONAL` in examples, tests. ([#9205](https://github.com/googleapis/google-cloud-python/pull/9205)) -- Move `benchwrapper` into `tests/perf`. ([#9246](https://github.com/googleapis/google-cloud-python/pull/9246)) -- Add support for `STORAGE_EMULATOR_HOST`; add `benchwrapper` script. ([#9219](https://github.com/googleapis/google-cloud-python/pull/9219)) - - -## 1.19.0 - -08-28-2019 09:45 PDT - -### Implementation Changes -- Expose 'HMACKeyMetadata.id' field. ([#9115](https://github.com/googleapis/google-cloud-python/pull/9115)) -- Make 'Blob.bucket' a readonly property. ([#9113](https://github.com/googleapis/google-cloud-python/pull/9113)) -- Clarify 'response_type' for signed_url methods. ([#8942](https://github.com/googleapis/google-cloud-python/pull/8942)) - -### New Features -- Add `client_options` to constructors for manual clients. ([#9054](https://github.com/googleapis/google-cloud-python/pull/9054)) - -### Documentation -- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035)) - -### Internal / Testing Changes -- Remove CI for gh-pages, use googleapis.dev for api_core refs. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085)) -- Fix tests broken by yesterday's google-resumable-media release. ([#9119](https://github.com/googleapis/google-cloud-python/pull/9119)) -- Harden 'test_access_to_public_bucket' systest against 429 / 503 errors. ([#8997](https://github.com/googleapis/google-cloud-python/pull/8997)) - -## 1.18.0 - -08-07-2019 00:37 PDT - - -### New Features -- Add HMAC key support. ([#8430](https://github.com/googleapis/google-cloud-python/pull/8430)) - -### Documentation -- Mark old storage classes as legacy, not deprecated. ([#8887](https://github.com/googleapis/google-cloud-python/pull/8887)) - -### Internal / Testing Changes -- Normalize 'lint' / 'blacken' support under nox. ([#8831](https://github.com/googleapis/google-cloud-python/pull/8831)) -- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805)) - -## 1.17.0 - -07-24-2019 12:37 PDT - - -### New Features -- Add `Bucket.location_type` property. ([#8570](https://github.com/googleapis/google-cloud-python/pull/8570)) -- Add `Client.list_blobs(bucket_or_name)`. ([#8375](https://github.com/googleapis/google-cloud-python/pull/8375)) - - -### Implementation Changes -- Retry bucket creation in signing setup. ([#8620](https://github.com/googleapis/google-cloud-python/pull/8620)) -- Fix URI -> blob name conversion in `Client download_blob_to_file`. ([#8440](https://github.com/googleapis/google-cloud-python/pull/8440)) -- Avoid escaping tilde in blob public / signed URLs. ([#8434](https://github.com/googleapis/google-cloud-python/pull/8434)) -- Add generation to 'Blob.__repr__'. ([#8423](https://github.com/googleapis/google-cloud-python/pull/8423)) - -### Documentation -- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705)) -- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288)) -- Fix example in `Client.download_blob_to_file` docstring. ([#8629](https://github.com/googleapis/google-cloud-python/pull/8629)) -- Remove typing information for kwargs to not conflict with type checkers ([#8546](https://github.com/googleapis/google-cloud-python/pull/8546)) - -### Internal / Testing Changes -- Skip failing `test_bpo_set_unset_preserves_acls` systest. ([#8617](https://github.com/googleapis/google-cloud-python/pull/8617)) -- Add nox session 'docs'. ([#8478](https://github.com/googleapis/google-cloud-python/pull/8478)) -- Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464)) - -## 1.16.1 - -06-04-2019 11:09 PDT - - -### Dependencies -- Don't pin `google-api-core` in libs using `google-cloud-core`. ([#8213](https://github.com/googleapis/google-cloud-python/pull/8213)) - -### Documentation -- Fix example in `download_blob_to_file` docstring. ([#8201](https://github.com/googleapis/google-cloud-python/pull/8201)) -- Tweak `fields` docstring further. ([#8040](https://github.com/googleapis/google-cloud-python/pull/8040)) -- Improve docs for `fields` argument to `Bucket.list_blobs`. ([#8023](https://github.com/googleapis/google-cloud-python/pull/8023)) -- Fix docs typo. ([#8027](https://github.com/googleapis/google-cloud-python/pull/8027)) - -### Internal / Testing Changes -- Retry harder in face of 409/429 during module teardown. ([#8113](https://github.com/googleapis/google-cloud-python/pull/8113)) -- Add more retries for 429s during teardown operations. ([#8112](https://github.com/googleapis/google-cloud-python/pull/8112)) - -## 1.16.0 - -05-16-2019 12:55 PDT - - -### New Features -- Update `Client.create_bucket` to take a Bucket object or string. ([#7820](https://github.com/googleapis/google-cloud-python/pull/7820)) -- Update `Client.get_bucket` to take a `Bucket` object or string. ([#7856](https://github.com/googleapis/google-cloud-python/pull/7856)) -- Add `Client.download_blob_to_file` method. ([#7949](https://github.com/googleapis/google-cloud-python/pull/7949)) -- Add `client_info` support to client / connection. ([#7872](https://github.com/googleapis/google-cloud-python/pull/7872)) - -### Dependencies -- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993)) -- Pin `google-auth >= 1.2.0`. ([#7798](https://github.com/googleapis/google-cloud-python/pull/7798)) - -## 1.15.0 - -04-17-2019 15:37 PDT - -### New Features -- Add support for V4 signed URLs ([#7460](https://github.com/googleapis/google-cloud-python/pull/7460)) -- Add generation arguments to bucket / blob methods. ([#7444](https://github.com/googleapis/google-cloud-python/pull/7444)) - -### Implementation Changes -- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535)) -- Ensure that 'Blob.reload' passes encryption headers. ([#7441](https://github.com/googleapis/google-cloud-python/pull/7441)) - -### Documentation -- Update client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307)) - -### Internal / Testing Changes -- Fix failing system tests ([#7714](https://github.com/googleapis/google-cloud-python/pull/7714)) -- Increase number of retries for 429 errors. ([#7484](https://github.com/googleapis/google-cloud-python/pull/7484)) -- Un-flake KMS integration tests expecting empty bucket. ([#7479](https://github.com/googleapis/google-cloud-python/pull/7479)) - -## 1.14.0 - -02-06-2019 12:49 PST - - -### New Features -- Add 'Bucket.iam_configuration' property, enabling Bucket-Policy-Only. ([#7066](https://github.com/googleapis/google-cloud-python/pull/7066)) - -### Documentation -- Improve docs for 'generate_signed_url'. ([#7201](https://github.com/googleapis/google-cloud-python/pull/7201)) - -## 1.13.2 - -12-17-2018 17:02 PST - - -### Implementation Changes -- Update `Blob.update_storage_class` to support rewrite tokens. ([#6527](https://github.com/googleapis/google-cloud-python/pull/6527)) - -### Internal / Testing Changes -- Skip signing tests for insufficient credentials ([#6917](https://github.com/googleapis/google-cloud-python/pull/6917)) -- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910)) -- Normalize docs for `page_size` / `max_results` / `page_token`. ([#6842](https://github.com/googleapis/google-cloud-python/pull/6842)) - -## 1.13.1 - -12-10-2018 13:31 PST - - -### Implementation Changes -- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741)) -- Accomodate new back-end restriction on retention period. ([#6388](https://github.com/googleapis/google-cloud-python/pull/6388)) -- Avoid deleting a blob renamed to itself ([#6365](https://github.com/googleapis/google-cloud-python/pull/6365)) - -### Dependencies -- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835)) -- Bump minimum `api_core` version for all GAPIC libs to 1.4.1. ([#6391](https://github.com/googleapis/google-cloud-python/pull/6391)) - -### Documentation -- Normalize use of support level badges ([#6159](https://github.com/googleapis/google-cloud-python/pull/6159)) - -### Internal / Testing Changes -- Blacken libraries ([#6794](https://github.com/googleapis/google-cloud-python/pull/6794)) -- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642)) -- Harden teardown in system tests. ([#6444](https://github.com/googleapis/google-cloud-python/pull/6444)) -- Harden `create_bucket` call in systests vs. 429 TooManyRequests. ([#6401](https://github.com/googleapis/google-cloud-python/pull/6401)) -- Skip public bucket test in VPC Service Controls ([#6230](https://github.com/googleapis/google-cloud-python/pull/6230)) -- Fix lint failure. ([#6219](https://github.com/googleapis/google-cloud-python/pull/6219)) -- Disable test running in VPC Service Controls restricted environment ([#6215](https://github.com/googleapis/google-cloud-python/pull/6215)) -- Use new Nox ([#6175](https://github.com/googleapis/google-cloud-python/pull/6175)) - -## 1.13.0 - -### New Features -- Add support for bucket retention policies ([#5534](https://github.com/googleapis/google-cloud-python/pull/5534)) -- Allow `destination.content_type` to be None in `Blob.compose`. ([#6031](https://github.com/googleapis/google-cloud-python/pull/6031)) - -### Implementation Changes -- Ensure that `method` for `Blob.generate_signed_url` is uppercase. ([#6110](https://github.com/googleapis/google-cloud-python/pull/6110)) - -### Documentation -- Clarify GCS URL signing limitations on GCE ([#6104](https://github.com/googleapis/google-cloud-python/pull/6104)) -- Redirect renamed 'usage.html'/'client.html' -> 'index.html'. ([#5996](https://github.com/googleapis/google-cloud-python/pull/5996)) - -## 1.12.0 - -### New Features -- Add support for Python 3.7, drop support for Python 3.4. ([#5942](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5942)) -- Add lifecycle rules helpers to bucket. ([#5877](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5877)) - -### Implementation Changes -- Add 'stacklevel=2' to deprecation warnings. ([#5897](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5897)) - -### Documentation -- Storage docs: fix typos. ([#5933](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5933)) -- Prep storage docs for repo split. ([#5923](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5923)) - -### Internal / Testing Changes -- Harden systest teardown further. ([#5900](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5900)) -- Nox: use inplace installs ([#5865](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/5865)) - -## 1.11.0 - -### Implementation Changes -- Preserve message / args from an `InvalidResponse`. (#5492) -- Fix generating signed urls for blobs with non-ascii names. (#5625) -- Move bucket location specification to `Bucket.create`; deprecate `Bucket.location` setter (#5808) - -### New Features -- Add `Client.get_service_account_email`. (#5765) - -### Documentation -- Clarify `None` values for resource-backed properties. (#5509) -- Elaborate docs for `{Bucket,Blob}.make_{public,private}`; note how to enable anonymous accesss to `Blob.public_url`. (#5767) - -### Internal / Testing Changes -- Harden `create_bucket` systest against 429 responses. (#5535) -- Add system test: signed URLs w/ non-ASCII blob name. (#5626) -- Harden `tearDownModule` against 429 TooManyRequests. (#5701) -- Retry `notification.create()` on `503 ServiceUnavailable`. (#5741) -- Fix failing KMS system tests. (#5832, #5837, #5860) - -## 1.10.0 - -### New Features -- Add support for KMS keys (#5259) -- Add `{Blob,Bucket}make_private` method (#5336) - -### Internal / Testing Changes -- Modify system tests to use prerelease versions of grpcio (#5304) - -## 1.9.0 - -### Implementation Changes -- Change GCS batch endpoint from `/batch` to `/batch/storage/v1` (#5040) - -### New Features -- Allow uploading files larger than 2GB by using Resumable Media Requests (#5187) -- Add range downloads (#5081) - -### Documentation -- Update docstring to reflect correct units (#5277) -- Replace link to 404 object IAM docs with a note on limited utility. (#5181) -- Update doc reference in GCS client documentation (#5084) -- Add see also for `Bucket.create` method call for `Client.create_bucket()` documentation. (#5073) -- Link out to requester pays docs. (#5065) - -### Internal / Testing Changes -- Add testing support for Python 3.7; remove testing support for Python 3.4. (#5295) -- Fix bad trove classifier -- Remove unused var (flake8 warning) (#5280) -- Fix unit test moving batch to batch/storage/v1 (#5082) - -## 1.8.0 - -### New features - -- Implement predefined acl (#4757) -- Add support for resumable signed url generation (#4789) - -### Implementation changes - -- Do not quote embedded slashes for public / signed URLs (#4716) - -### Dependencies - -- Update dependency range for api-core to include v1.0.0 releases (#4944) - -### Documentation - -- Missing word in docstring (#4763) - -### Testing and internal changes - -- Install local dependencies when running lint (#4936) -- Re-enable lint for tests, remove usage of pylint (#4921) -- Normalize all setup.py files (#4909) - -## 1.7.0 - -### Features - -- Enable anonymous access to blobs in public buckets (#4315) -- Make project optional / overridable for storage client (#4381) -- Relax regex used to test for valid project IDs (#4543) -- Add support for `source_generation` parameter to `Bucket.copy_blob` (#4546) - -## 1.6.0 - -### Documentation - -- Added link to "Python Development Environment Setup Guide" in - project README (#4187, h/t to @michaelawyu) - -### Dependencies - -- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency - on `google-api-core` (#4221, #4280) -- Requiring `google-resumable-media >= 0.3.1` (#4244) - -PyPI: https://pypi.org/project/google-cloud-storage/1.6.0/ diff --git a/storage/LICENSE b/storage/LICENSE deleted file mode 100644 index d64569567334..000000000000 --- a/storage/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/storage/MANIFEST.in b/storage/MANIFEST.in deleted file mode 100644 index fc77f8c82ff0..000000000000 --- a/storage/MANIFEST.in +++ /dev/null @@ -1,4 +0,0 @@ -include README.rst LICENSE -recursive-include google *.json *.proto -recursive-include tests * -global-exclude *.pyc __pycache__ diff --git a/storage/README.rst b/storage/README.rst deleted file mode 100644 index bb15ee569b49..000000000000 --- a/storage/README.rst +++ /dev/null @@ -1,104 +0,0 @@ -Python Client for Google Cloud Storage -====================================== - -|GA| |pypi| |versions| - -`Google Cloud Storage`_ allows you to store data on -Google infrastructure with very high reliability, performance and -availability, and can be used to distribute large data objects to users -via direct download. - -- `Client Library Documentation`_ -- `Storage API docs`_ - -.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability -.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-storage.svg - :target: https://pypi.org/project/google-cloud-storage -.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-storage.svg - :target: https://pypi.org/project/google-cloud-storage -.. _Google Cloud Storage: https://cloud.google.com/storage/docs -.. _Client Library Documentation: https://googleapis.dev/python/storage/latest -.. _Storage API docs: https://cloud.google.com/storage/docs/json_api/v1 - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. `Enable the Google Cloud Storage API.`_ -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Enable the Google Cloud Storage API.: https://cloud.google.com/storage -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Supported Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 - -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - source /bin/activate - /bin/pip install google-cloud-storage - - -Windows -^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - \Scripts\activate - \Scripts\pip.exe install google-cloud-storage - - -Example Usage -~~~~~~~~~~~~~ - -You need to create a Google Cloud Storage bucket to use this client library. -Follow along with the `official Google Cloud Storage documentation`_ to learn -how to create a bucket. - -.. _official Google Cloud Storage documentation: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets - -.. code:: python - - from google.cloud import storage - client = storage.Client() - # https://console.cloud.google.com/storage/browser/[bucket-id]/ - bucket = client.get_bucket('bucket-id-here') - # Then do other things... - blob = bucket.get_blob('remote/path/to/file.txt') - print(blob.download_as_string()) - blob.upload_from_string('New contents!') - blob2 = bucket.blob('remote/path/storage.txt') - blob2.upload_from_filename(filename='/local/path.txt') diff --git a/storage/docs/README.rst b/storage/docs/README.rst deleted file mode 120000 index 89a0106941ff..000000000000 --- a/storage/docs/README.rst +++ /dev/null @@ -1 +0,0 @@ -../README.rst \ No newline at end of file diff --git a/storage/docs/_static/custom.css b/storage/docs/_static/custom.css deleted file mode 100644 index 9a6f9f8ddc3a..000000000000 --- a/storage/docs/_static/custom.css +++ /dev/null @@ -1,4 +0,0 @@ -div#python2-eol { - border-color: red; - border-width: medium; -} \ No newline at end of file diff --git a/storage/docs/_templates/layout.html b/storage/docs/_templates/layout.html deleted file mode 100644 index de457b2c2767..000000000000 --- a/storage/docs/_templates/layout.html +++ /dev/null @@ -1,49 +0,0 @@ -{% extends "!layout.html" %} -{%- block content %} -{%- if theme_fixed_sidebar|lower == 'true' %} -
- {{ sidebar() }} - {%- block document %} -
- {%- if render_sidebar %} -
- {%- endif %} - - {%- block relbar_top %} - {%- if theme_show_relbar_top|tobool %} - - {%- endif %} - {% endblock %} - -
-
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please - visit Python 2 support on Google Cloud. -
- {% block body %} {% endblock %} -
- - {%- block relbar_bottom %} - {%- if theme_show_relbar_bottom|tobool %} - - {%- endif %} - {% endblock %} - - {%- if render_sidebar %} -
- {%- endif %} -
- {%- endblock %} -
-
-{%- else %} -{{ super() }} -{%- endif %} -{%- endblock %} diff --git a/storage/docs/acl.rst b/storage/docs/acl.rst deleted file mode 100644 index f1f7d028907d..000000000000 --- a/storage/docs/acl.rst +++ /dev/null @@ -1,6 +0,0 @@ -ACL -~~~ - -.. automodule:: google.cloud.storage.acl - :members: - :show-inheritance: diff --git a/storage/docs/batch.rst b/storage/docs/batch.rst deleted file mode 100644 index 992dd9349157..000000000000 --- a/storage/docs/batch.rst +++ /dev/null @@ -1,6 +0,0 @@ -Batches -~~~~~~~ - -.. automodule:: google.cloud.storage.batch - :members: - :show-inheritance: diff --git a/storage/docs/blobs.rst b/storage/docs/blobs.rst deleted file mode 100644 index 19392a741d64..000000000000 --- a/storage/docs/blobs.rst +++ /dev/null @@ -1,7 +0,0 @@ -Blobs / Objects -~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.storage.blob - :members: - :inherited-members: - :show-inheritance: diff --git a/storage/docs/buckets.rst b/storage/docs/buckets.rst deleted file mode 100644 index c42d7e303166..000000000000 --- a/storage/docs/buckets.rst +++ /dev/null @@ -1,7 +0,0 @@ -Buckets -~~~~~~~ - -.. automodule:: google.cloud.storage.bucket - :members: - :inherited-members: - :show-inheritance: diff --git a/storage/docs/changelog.md b/storage/docs/changelog.md deleted file mode 120000 index 04c99a55caae..000000000000 --- a/storage/docs/changelog.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGELOG.md \ No newline at end of file diff --git a/storage/docs/client.rst b/storage/docs/client.rst deleted file mode 100644 index 54f094dea9be..000000000000 --- a/storage/docs/client.rst +++ /dev/null @@ -1,6 +0,0 @@ -Storage Client -~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.storage.client - :members: - :show-inheritance: diff --git a/storage/docs/conf.py b/storage/docs/conf.py deleted file mode 100644 index cdf8c7c62b64..000000000000 --- a/storage/docs/conf.py +++ /dev/null @@ -1,359 +0,0 @@ -# -*- coding: utf-8 -*- -# -# google-cloud-storage documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-storage" -copyright = u"2017, Google" -author = u"Google APIs" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-storage-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', - # Latex figure (float) alignment - #'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-storage.tex", - u"google-cloud-storage Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-storage", - u"google-cloud-storage Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-storage", - u"google-cloud-storage Documentation", - author, - "google-cloud-storage", - "GAPIC library for the Storage API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/storage/docs/constants.rst b/storage/docs/constants.rst deleted file mode 100644 index ddf5b81f29a7..000000000000 --- a/storage/docs/constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -Constants -~~~~~~~~~ - -.. automodule:: google.cloud.storage.constants - :members: - :member-order: bysource - diff --git a/storage/docs/hmac_key.rst b/storage/docs/hmac_key.rst deleted file mode 100644 index 432be5f64ebe..000000000000 --- a/storage/docs/hmac_key.rst +++ /dev/null @@ -1,6 +0,0 @@ -HMAC Key Metadata -~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.storage.hmac_key - :members: - :show-inheritance: diff --git a/storage/docs/index.rst b/storage/docs/index.rst deleted file mode 100644 index 7a74f12cdf7c..000000000000 --- a/storage/docs/index.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. include:: README.rst - -.. note:: - - Because the storage client uses the third-party :mod:`requests` library by - default, it is safe to share instances across threads. In multiprocessing - scenarious, best practice is to create client instances *after* - :class:`multiprocessing.Pool` or :class:`multiprocessing.Process` invokes - :func:`os.fork`. - -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - client - blobs - buckets - acl - batch - constants - hmac_key - notification - -Changelog ---------- -.. toctree:: - :maxdepth: 2 - - changelog diff --git a/storage/docs/notification.rst b/storage/docs/notification.rst deleted file mode 100644 index cdb381d2f703..000000000000 --- a/storage/docs/notification.rst +++ /dev/null @@ -1,6 +0,0 @@ -Notification -~~~~~~~~~~~~ - -.. automodule:: google.cloud.storage.notification - :members: - :show-inheritance: diff --git a/storage/docs/snippets.py b/storage/docs/snippets.py deleted file mode 100644 index 8171d5cf80f1..000000000000 --- a/storage/docs/snippets.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Testable usage examples for Google Cloud Storage API wrapper - -Each example function takes a ``client`` argument (which must be an instance -of :class:`google.cloud.storage.client.Client`) and uses it to perform a task -with the API. - -To facilitate running the examples as system tests, each example is also passed -a ``to_delete`` list; the function adds to the list any objects created which -need to be deleted during teardown. -""" - -from google.cloud import storage - - -def snippet(func): - """Mark ``func`` as a snippet example function.""" - func._snippet = True - return func - - -@snippet -def storage_get_started(client, to_delete): - # [START storage_get_started] - client = storage.Client() - bucket = client.get_bucket("bucket-id-here") - # Then do other things... - blob = bucket.get_blob("/remote/path/to/file.txt") - assert blob.download_as_string() == b"My old contents!" - blob.upload_from_string("New contents!") - blob2 = bucket.blob("/remote/path/storage.txt") - blob2.upload_from_filename(filename="/local/path.txt") - # [END storage_get_started] - - to_delete.append(bucket) - - -@snippet -def client_bucket_acl(client, to_delete): - bucket_name = "system-test-bucket" - bucket = client.bucket(bucket_name) - bucket.create() - - # [START client_bucket_acl] - client = storage.Client() - bucket = client.get_bucket(bucket_name) - acl = bucket.acl - # [END client_bucket_acl] - to_delete.append(bucket) - - # [START acl_user_settings] - acl.user("me@example.org").grant_read() - acl.all_authenticated().grant_write() - # [END acl_user_settings] - - # [START acl_save] - acl.save() - # [END acl_save] - - # [START acl_revoke_write] - acl.all().grant_read() - acl.all().revoke_write() - # [END acl_revoke_write] - - # [START acl_save_bucket] - bucket.acl.save(acl=acl) - # [END acl_save_bucket] - - # [START acl_print] - print(list(acl)) - # [{'role': 'OWNER', 'entity': 'allUsers'}, ...] - # [END acl_print] - - -@snippet -def download_to_file(client, to_delete): - # [START download_to_file] - from google.cloud.storage import Blob - - client = storage.Client(project="my-project") - bucket = client.get_bucket("my-bucket") - encryption_key = "c7f32af42e45e85b9848a6a14dd2a8f6" - blob = Blob("secure-data", bucket, encryption_key=encryption_key) - blob.upload_from_string("my secret message.") - with open("/tmp/my-secure-file", "wb") as file_obj: - blob.download_to_file(file_obj) - # [END download_to_file] - - to_delete.append(blob) - - -@snippet -def upload_from_file(client, to_delete): - # [START upload_from_file] - from google.cloud.storage import Blob - - client = storage.Client(project="my-project") - bucket = client.get_bucket("my-bucket") - encryption_key = "aa426195405adee2c8081bb9e7e74b19" - blob = Blob("secure-data", bucket, encryption_key=encryption_key) - with open("my-file", "rb") as my_file: - blob.upload_from_file(my_file) - # [END upload_from_file] - - to_delete.append(blob) - - -@snippet -def get_blob(client, to_delete): - from google.cloud.storage.blob import Blob - - # [START get_blob] - client = storage.Client() - bucket = client.get_bucket("my-bucket") - assert isinstance(bucket.get_blob("/path/to/blob.txt"), Blob) - # - assert not bucket.get_blob("/does-not-exist.txt") - # None - # [END get_blob] - - to_delete.append(bucket) - - -@snippet -def delete_blob(client, to_delete): - # [START delete_blob] - from google.cloud.exceptions import NotFound - - client = storage.Client() - bucket = client.get_bucket("my-bucket") - blobs = list(bucket.list_blobs()) - assert len(blobs) > 0 - # [] - bucket.delete_blob("my-file.txt") - try: - bucket.delete_blob("doesnt-exist") - except NotFound: - pass - # [END delete_blob] - - blob = None - # [START delete_blobs] - bucket.delete_blobs([blob], on_error=lambda blob: None) - # [END delete_blobs] - - to_delete.append(bucket) - - -@snippet -def configure_website(client, to_delete): - bucket_name = "test-bucket" - # [START configure_website] - client = storage.Client() - bucket = client.get_bucket(bucket_name) - bucket.configure_website("index.html", "404.html") - # [END configure_website] - - # [START make_public] - bucket.make_public(recursive=True, future=True) - # [END make_public] - - to_delete.append(bucket) - - -@snippet -def get_bucket(client, to_delete): - import google - - # [START get_bucket] - try: - bucket = client.get_bucket("my-bucket") - except google.cloud.exceptions.NotFound: - print("Sorry, that bucket does not exist!") - # [END get_bucket] - to_delete.append(bucket) - - -@snippet -def add_lifecycle_delete_rule(client, to_delete): - # [START add_lifecycle_delete_rule] - bucket = client.get_bucket("my-bucket") - bucket.add_lifecycle_rule_delete(age=2) - bucket.patch() - # [END add_lifecycle_delete_rule] - to_delete.append(bucket) - - -@snippet -def add_lifecycle_set_storage_class_rule(client, to_delete): - # [START add_lifecycle_set_storage_class_rule] - bucket = client.get_bucket("my-bucket") - bucket.add_lifecycle_rule_set_storage_class( - "COLD_LINE", matches_storage_class=["NEARLINE"] - ) - bucket.patch() - # [END add_lifecycle_set_storage_class_rule] - to_delete.append(bucket) - - -@snippet -def lookup_bucket(client, to_delete): - from google.cloud.storage.bucket import Bucket - - # [START lookup_bucket] - bucket = client.lookup_bucket("doesnt-exist") - assert not bucket - # None - bucket = client.lookup_bucket("my-bucket") - assert isinstance(bucket, Bucket) - # - # [END lookup_bucket] - - to_delete.append(bucket) - - -@snippet -def create_bucket(client, to_delete): - from google.cloud.storage import Bucket - - # [START create_bucket] - bucket = client.create_bucket("my-bucket") - assert isinstance(bucket, Bucket) - # - # [END create_bucket] - - to_delete.append(bucket) - - -@snippet -def list_buckets(client, to_delete): - # [START list_buckets] - for bucket in client.list_buckets(): - print(bucket) - # [END list_buckets] - - for bucket in client.list_buckets(): - to_delete.append(bucket) - - -@snippet -def policy_document(client, to_delete): - # pylint: disable=unused-argument - # [START policy_document] - bucket = client.bucket("my-bucket") - conditions = [["starts-with", "$key", ""], {"acl": "public-read"}] - - policy = bucket.generate_upload_policy(conditions) - - # Generate an upload form using the form fields. - policy_fields = "".join( - ''.format( - key=key, value=value - ) - for key, value in policy.items() - ) - - upload_form = ( - '
' - '' - '' - '' - '' - '' - "{policy_fields}" - "" - ).format(bucket_name=bucket.name, policy_fields=policy_fields) - - print(upload_form) - # [END policy_document] - - -def _line_no(func): - code = getattr(func, "__code__", None) or getattr(func, "func_code") - return code.co_firstlineno - - -def _find_examples(): - funcs = [obj for obj in globals().values() if getattr(obj, "_snippet", False)] - for func in sorted(funcs, key=_line_no): - yield func - - -def _name_and_doc(func): - return func.__name__, func.__doc__ - - -def main(): - client = storage.Client() - for example in _find_examples(): - to_delete = [] - print("%-25s: %s" % _name_and_doc(example)) - try: - example(client, to_delete) - except AssertionError as failure: - print(" FAIL: %s" % (failure,)) - except Exception as error: # pylint: disable=broad-except - print(" ERROR: %r" % (error,)) - for item in to_delete: - item.delete() - - -if __name__ == "__main__": - main() diff --git a/storage/google/__init__.py b/storage/google/__init__.py deleted file mode 100644 index 0e1bc5131ba6..000000000000 --- a/storage/google/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/storage/google/cloud/__init__.py b/storage/google/cloud/__init__.py deleted file mode 100644 index 0e1bc5131ba6..000000000000 --- a/storage/google/cloud/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil - - __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/storage/google/cloud/storage/__init__.py b/storage/google/cloud/storage/__init__.py deleted file mode 100644 index 2b643fc80add..000000000000 --- a/storage/google/cloud/storage/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shortcut methods for getting set up with Google Cloud Storage. - -You'll typically use these to get started with the API: - -.. literalinclude:: snippets.py - :start-after: [START storage_get_started] - :end-before: [END storage_get_started] - -The main concepts with this API are: - -- :class:`~google.cloud.storage.bucket.Bucket` which represents a particular - bucket (akin to a mounted disk on a computer). - -- :class:`~google.cloud.storage.blob.Blob` which represents a pointer to a - particular entity in Cloud Storage (akin to a file path on a remote - machine). -""" - - -from pkg_resources import get_distribution - -__version__ = get_distribution("google-cloud-storage").version - -from google.cloud.storage.batch import Batch -from google.cloud.storage.blob import Blob -from google.cloud.storage.bucket import Bucket -from google.cloud.storage.client import Client - - -__all__ = ["__version__", "Batch", "Blob", "Bucket", "Client"] diff --git a/storage/google/cloud/storage/_helpers.py b/storage/google/cloud/storage/_helpers.py deleted file mode 100644 index 5bfa13e313ea..000000000000 --- a/storage/google/cloud/storage/_helpers.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for Cloud Storage utility classes. - -These are *not* part of the API. -""" - -import base64 -from hashlib import md5 -import os - -STORAGE_EMULATOR_ENV_VAR = "STORAGE_EMULATOR_HOST" -"""Environment variable defining host for Storage emulator.""" - -_DEFAULT_STORAGE_HOST = u"https://storage.googleapis.com" - - -def _get_storage_host(): - return os.environ.get(STORAGE_EMULATOR_ENV_VAR, _DEFAULT_STORAGE_HOST) - - -def _validate_name(name): - """Pre-flight ``Bucket`` name validation. - - :type name: str or :data:`NoneType` - :param name: Proposed bucket name. - - :rtype: str or :data:`NoneType` - :returns: ``name`` if valid. - """ - if name is None: - return - - # The first and last characters must be alphanumeric. - if not all([name[0].isalnum(), name[-1].isalnum()]): - raise ValueError("Bucket names must start and end with a number or letter.") - return name - - -class _PropertyMixin(object): - """Abstract mixin for cloud storage classes with associated properties. - - Non-abstract subclasses should implement: - - path - - client - - user_project - - :type name: str - :param name: The name of the object. Bucket names must start and end with a - number or letter. - """ - - def __init__(self, name=None): - self.name = name - self._properties = {} - self._changes = set() - - @property - def path(self): - """Abstract getter for the object path.""" - raise NotImplementedError - - @property - def client(self): - """Abstract getter for the object client.""" - raise NotImplementedError - - @property - def user_project(self): - """Abstract getter for the object user_project.""" - raise NotImplementedError - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - - :rtype: :class:`google.cloud.storage.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.client - return client - - def _encryption_headers(self): - """Return any encryption headers needed to fetch the object. - - .. note:: - Defined here because :meth:`reload` calls it, but this method is - really only relevant for :class:`~google.cloud.storage.blob.Blob`. - - :rtype: dict - :returns: a mapping of encryption-related headers. - """ - return {} - - @property - def _query_params(self): - """Default query parameters.""" - params = {} - if self.user_project is not None: - params["userProject"] = self.user_project - return params - - def reload(self, client=None): - """Reload properties from Cloud Storage. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - client = self._require_client(client) - query_params = self._query_params - # Pass only '?projection=noAcl' here because 'acl' and related - # are handled via custom endpoints. - query_params["projection"] = "noAcl" - api_response = client._connection.api_request( - method="GET", - path=self.path, - query_params=query_params, - headers=self._encryption_headers(), - _target_object=self, - ) - self._set_properties(api_response) - - def _patch_property(self, name, value): - """Update field of this object's properties. - - This method will only update the field provided and will not - touch the other fields. - - It **will not** reload the properties from the server. The behavior is - local only and syncing occurs via :meth:`patch`. - - :type name: str - :param name: The field name to update. - - :type value: object - :param value: The value being updated. - """ - self._changes.add(name) - self._properties[name] = value - - def _set_properties(self, value): - """Set the properties for the current object. - - :type value: dict or :class:`google.cloud.storage.batch._FutureDict` - :param value: The properties to be set. - """ - self._properties = value - # If the values are reset, the changes must as well. - self._changes = set() - - def patch(self, client=None): - """Sends all changed properties in a PATCH request. - - Updates the ``_properties`` with the response from the backend. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - client = self._require_client(client) - query_params = self._query_params - # Pass '?projection=full' here because 'PATCH' documented not - # to work properly w/ 'noAcl'. - query_params["projection"] = "full" - update_properties = {key: self._properties[key] for key in self._changes} - - # Make the API call. - api_response = client._connection.api_request( - method="PATCH", - path=self.path, - data=update_properties, - query_params=query_params, - _target_object=self, - ) - self._set_properties(api_response) - - def update(self, client=None): - """Sends all properties in a PUT request. - - Updates the ``_properties`` with the response from the backend. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - client = self._require_client(client) - query_params = self._query_params - query_params["projection"] = "full" - api_response = client._connection.api_request( - method="PUT", - path=self.path, - data=self._properties, - query_params=query_params, - _target_object=self, - ) - self._set_properties(api_response) - - -def _scalar_property(fieldname): - """Create a property descriptor around the :class:`_PropertyMixin` helpers. - """ - - def _getter(self): - """Scalar property getter.""" - return self._properties.get(fieldname) - - def _setter(self, value): - """Scalar property setter.""" - self._patch_property(fieldname, value) - - return property(_getter, _setter) - - -def _write_buffer_to_hash(buffer_object, hash_obj, digest_block_size=8192): - """Read blocks from a buffer and update a hash with them. - - :type buffer_object: bytes buffer - :param buffer_object: Buffer containing bytes used to update a hash object. - - :type hash_obj: object that implements update - :param hash_obj: A hash object (MD5 or CRC32-C). - - :type digest_block_size: int - :param digest_block_size: The block size to write to the hash. - Defaults to 8192. - """ - block = buffer_object.read(digest_block_size) - - while len(block) > 0: - hash_obj.update(block) - # Update the block for the next iteration. - block = buffer_object.read(digest_block_size) - - -def _base64_md5hash(buffer_object): - """Get MD5 hash of bytes (as base64). - - :type buffer_object: bytes buffer - :param buffer_object: Buffer containing bytes used to compute an MD5 - hash (as base64). - - :rtype: str - :returns: A base64 encoded digest of the MD5 hash. - """ - hash_obj = md5() - _write_buffer_to_hash(buffer_object, hash_obj) - digest_bytes = hash_obj.digest() - return base64.b64encode(digest_bytes) diff --git a/storage/google/cloud/storage/_http.py b/storage/google/cloud/storage/_http.py deleted file mode 100644 index 032f70e02185..000000000000 --- a/storage/google/cloud/storage/_http.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with Google Cloud Storage connections.""" - -from google.cloud import _http - -from google.cloud.storage import __version__ - - -class Connection(_http.JSONConnection): - """A connection to Google Cloud Storage via the JSON REST API. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: The client that owns the current connection. - - :type client_info: :class:`~google.api_core.client_info.ClientInfo` - :param client_info: (Optional) instance used to generate user agent. - """ - - DEFAULT_API_ENDPOINT = "https://storage.googleapis.com" - - def __init__(self, client, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT): - super(Connection, self).__init__(client, client_info) - self.API_BASE_URL = api_endpoint - self._client_info.client_library_version = __version__ - - # TODO: When metrics all use gccl, this should be removed #9552 - if self._client_info.user_agent is None: # pragma: no branch - self._client_info.user_agent = "" - self._client_info.user_agent += " gcloud-python/{} ".format(__version__) - - API_VERSION = "v1" - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = "{api_base_url}/storage/{api_version}{path}" - """A template for the URL of a particular API call.""" diff --git a/storage/google/cloud/storage/_signing.py b/storage/google/cloud/storage/_signing.py deleted file mode 100644 index e7c8e3328703..000000000000 --- a/storage/google/cloud/storage/_signing.py +++ /dev/null @@ -1,668 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import base64 -import binascii -import collections -import datetime -import hashlib -import re -import json - -import six - -import google.auth.credentials - -from google.auth import exceptions -from google.auth.transport import requests -from google.cloud import _helpers - - -NOW = datetime.datetime.utcnow # To be replaced by tests. -MULTIPLE_SPACES_RE = r"\s+" -MULTIPLE_SPACES = re.compile(MULTIPLE_SPACES_RE) - -SERVICE_ACCOUNT_URL = ( - "https://googleapis.dev/python/google-api-core/latest/" - "auth.html#setting-up-a-service-account" -) - - -def ensure_signed_credentials(credentials): - """Raise AttributeError if the credentials are unsigned. - - :type credentials: :class:`google.auth.credentials.Signing` - :param credentials: The credentials used to create a private key - for signing text. - - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - """ - if not isinstance(credentials, google.auth.credentials.Signing): - raise AttributeError( - "you need a private key to sign credentials." - "the credentials you are currently using {} " - "just contains a token. see {} for more " - "details.".format(type(credentials), SERVICE_ACCOUNT_URL) - ) - - -def get_signed_query_params_v2(credentials, expiration, string_to_sign): - """Gets query parameters for creating a signed URL. - - :type credentials: :class:`google.auth.credentials.Signing` - :param credentials: The credentials used to create a private key - for signing text. - - :type expiration: int or long - :param expiration: When the signed URL should expire. - - :type string_to_sign: str - :param string_to_sign: The string to be signed by the credentials. - - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - - :rtype: dict - :returns: Query parameters matching the signing credentials with a - signed payload. - """ - ensure_signed_credentials(credentials) - signature_bytes = credentials.sign_bytes(string_to_sign) - signature = base64.b64encode(signature_bytes) - service_account_name = credentials.signer_email - return { - "GoogleAccessId": service_account_name, - "Expires": str(expiration), - "Signature": signature, - } - - -def get_expiration_seconds_v2(expiration): - """Convert 'expiration' to a number of seconds in the future. - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :raises: :exc:`TypeError` when expiration is not a valid type. - - :rtype: int - :returns: a timestamp as an absolute number of seconds since epoch. - """ - # If it's a timedelta, add it to `now` in UTC. - if isinstance(expiration, datetime.timedelta): - now = NOW().replace(tzinfo=_helpers.UTC) - expiration = now + expiration - - # If it's a datetime, convert to a timestamp. - if isinstance(expiration, datetime.datetime): - micros = _helpers._microseconds_from_datetime(expiration) - expiration = micros // 10 ** 6 - - if not isinstance(expiration, six.integer_types): - raise TypeError( - "Expected an integer timestamp, datetime, or " - "timedelta. Got %s" % type(expiration) - ) - return expiration - - -_EXPIRATION_TYPES = six.integer_types + (datetime.datetime, datetime.timedelta) - - -def get_expiration_seconds_v4(expiration): - """Convert 'expiration' to a number of seconds offset from the current time. - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :raises: :exc:`TypeError` when expiration is not a valid type. - :raises: :exc:`ValueError` when expiration is too large. - :rtype: Integer - :returns: seconds in the future when the signed URL will expire - """ - if not isinstance(expiration, _EXPIRATION_TYPES): - raise TypeError( - "Expected an integer timestamp, datetime, or " - "timedelta. Got %s" % type(expiration) - ) - - now = NOW().replace(tzinfo=_helpers.UTC) - - if isinstance(expiration, six.integer_types): - seconds = expiration - - if isinstance(expiration, datetime.datetime): - - if expiration.tzinfo is None: - expiration = expiration.replace(tzinfo=_helpers.UTC) - - expiration = expiration - now - - if isinstance(expiration, datetime.timedelta): - seconds = int(expiration.total_seconds()) - - if seconds > SEVEN_DAYS: - raise ValueError( - "Max allowed expiration interval is seven days (%d seconds)".format( - SEVEN_DAYS - ) - ) - - return seconds - - -def get_canonical_headers(headers): - """Canonicalize headers for signing. - - See: - https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers - - :type headers: Union[dict|List(Tuple(str,str))] - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :rtype: str - :returns: List of headers, normalized / sortted per the URL refernced above. - """ - if headers is None: - headers = [] - elif isinstance(headers, dict): - headers = list(headers.items()) - - if not headers: - return [], [] - - normalized = collections.defaultdict(list) - for key, val in headers: - key = key.lower().strip() - val = MULTIPLE_SPACES.sub(" ", val.strip()) - normalized[key].append(val) - - ordered_headers = sorted((key, ",".join(val)) for key, val in normalized.items()) - - canonical_headers = ["{}:{}".format(*item) for item in ordered_headers] - return canonical_headers, ordered_headers - - -_Canonical = collections.namedtuple( - "_Canonical", ["method", "resource", "query_parameters", "headers"] -) - - -def canonicalize(method, resource, query_parameters, headers): - """Canonicalize method, resource - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the - signature will additionally contain the `x-goog-resumable` - header, and the method changed to POST. See the signed URL - docs regarding this flow: - https://cloud.google.com/storage/docs/access-control/signed-urls - - :type resource: str - :param resource: A pointer to a specific resource - (typically, ``/bucket-name/path/to/blob.txt``). - - :type query_parameters: dict - :param query_parameters: - (Optional) Additional query paramtersto be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers#query - - :type headers: Union[dict|List(Tuple(str,str))] - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :rtype: :class:_Canonical - :returns: Canonical method, resource, query_parameters, and headers. - """ - headers, _ = get_canonical_headers(headers) - - if method == "RESUMABLE": - method = "POST" - headers.append("x-goog-resumable:start") - - if query_parameters is None: - return _Canonical(method, resource, [], headers) - - normalized_qp = sorted( - (key.lower(), value and value.strip() or "") - for key, value in query_parameters.items() - ) - encoded_qp = six.moves.urllib.parse.urlencode(normalized_qp) - canonical_resource = "{}?{}".format(resource, encoded_qp) - return _Canonical(method, canonical_resource, normalized_qp, headers) - - -def generate_signed_url_v2( - credentials, - resource, - expiration, - api_access_endpoint="", - method="GET", - content_md5=None, - content_type=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - service_account_email=None, - access_token=None, -): - """Generate a V2 signed URL to provide query-string auth'n to a resource. - - .. note:: - - Assumes ``credentials`` implements the - :class:`google.auth.credentials.Signing` interface. Also assumes - ``credentials`` has a ``service_account_email`` property which - identifies the credentials. - - .. note:: - - If you are on Google Compute Engine, you can't generate a signed URL. - Follow `Issue 922`_ for updates on this. If you'd like to be able to - generate a signed URL from GCE, you can use a standard service account - from a JSON file rather than a GCE service account. - - See headers `reference`_ for more details on optional arguments. - - .. _Issue 922: https://github.com/GoogleCloudPlatform/\ - google-cloud-python/issues/922 - .. _reference: https://cloud.google.com/storage/docs/reference-headers - - :type credentials: :class:`google.auth.credentials.Signing` - :param credentials: Credentials object with an associated private key to - sign text. - - :type resource: str - :param resource: A pointer to a specific resource - (typically, ``/bucket-name/path/to/blob.txt``). - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :type api_access_endpoint: str - :param api_access_endpoint: Optional URI base. Defaults to empty string. - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the - signature will additionally contain the `x-goog-resumable` - header, and the method changed to POST. See the signed URL - docs regarding this flow: - https://cloud.google.com/storage/docs/access-control/signed-urls - - - :type content_md5: str - :param content_md5: (Optional) The MD5 hash of the object referenced by - ``resource``. - - :type content_type: str - :param content_type: (Optional) The content type of the object referenced - by ``resource``. - - :type response_type: str - :param response_type: (Optional) Content type of responses to requests for - the signed URL. Ignored if content_type is set on - object/blob metadata. - - :type response_disposition: str - :param response_disposition: (Optional) Content disposition of responses to - requests for the signed URL. - - :type generation: str - :param generation: (Optional) A value that indicates which generation of - the resource to fetch. - - :type headers: Union[dict|List(Tuple(str,str))] - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :type service_account_email: str - :param service_account_email: (Optional) E-mail address of the service account. - - :type access_token: str - :param access_token: (Optional) Access token for a service account. - - :type query_parameters: dict - :param query_parameters: - (Optional) Additional query paramtersto be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers#query - - :raises: :exc:`TypeError` when expiration is not a valid type. - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - - :rtype: str - :returns: A signed URL you can use to access the resource - until expiration. - """ - expiration_stamp = get_expiration_seconds_v2(expiration) - - canonical = canonicalize(method, resource, query_parameters, headers) - - # Generate the string to sign. - elements_to_sign = [ - canonical.method, - content_md5 or "", - content_type or "", - str(expiration_stamp), - ] - elements_to_sign.extend(canonical.headers) - elements_to_sign.append(canonical.resource) - string_to_sign = "\n".join(elements_to_sign) - - # Set the right query parameters. - if access_token and service_account_email: - signature = _sign_message(string_to_sign, access_token, service_account_email) - signed_query_params = { - "GoogleAccessId": service_account_email, - "Expires": str(expiration), - "Signature": signature, - } - else: - signed_query_params = get_signed_query_params_v2( - credentials, expiration_stamp, string_to_sign - ) - - if response_type is not None: - signed_query_params["response-content-type"] = response_type - if response_disposition is not None: - signed_query_params["response-content-disposition"] = response_disposition - if generation is not None: - signed_query_params["generation"] = generation - - signed_query_params.update(canonical.query_parameters) - sorted_signed_query_params = sorted(signed_query_params.items()) - - # Return the built URL. - return "{endpoint}{resource}?{querystring}".format( - endpoint=api_access_endpoint, - resource=resource, - querystring=six.moves.urllib.parse.urlencode(sorted_signed_query_params), - ) - - -SEVEN_DAYS = 7 * 24 * 60 * 60 # max age for V4 signed URLs. -DEFAULT_ENDPOINT = "https://storage.googleapis.com" - - -def generate_signed_url_v4( - credentials, - resource, - expiration, - api_access_endpoint=DEFAULT_ENDPOINT, - method="GET", - content_md5=None, - content_type=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - service_account_email=None, - access_token=None, - _request_timestamp=None, # for testing only -): - """Generate a V4 signed URL to provide query-string auth'n to a resource. - - .. note:: - - Assumes ``credentials`` implements the - :class:`google.auth.credentials.Signing` interface. Also assumes - ``credentials`` has a ``service_account_email`` property which - identifies the credentials. - - .. note:: - - If you are on Google Compute Engine, you can't generate a signed URL. - Follow `Issue 922`_ for updates on this. If you'd like to be able to - generate a signed URL from GCE, you can use a standard service account - from a JSON file rather than a GCE service account. - - See headers `reference`_ for more details on optional arguments. - - .. _Issue 922: https://github.com/GoogleCloudPlatform/\ - google-cloud-python/issues/922 - .. _reference: https://cloud.google.com/storage/docs/reference-headers - - :type credentials: :class:`google.auth.credentials.Signing` - :param credentials: Credentials object with an associated private key to - sign text. - - :type resource: str - :param resource: A pointer to a specific resource - (typically, ``/bucket-name/path/to/blob.txt``). - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :type api_access_endpoint: str - :param api_access_endpoint: Optional URI base. Defaults to - "https://storage.googleapis.com/" - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - Defaults to ``'GET'``. If method is ``'RESUMABLE'`` then the - signature will additionally contain the `x-goog-resumable` - header, and the method changed to POST. See the signed URL - docs regarding this flow: - https://cloud.google.com/storage/docs/access-control/signed-urls - - - :type content_md5: str - :param content_md5: (Optional) The MD5 hash of the object referenced by - ``resource``. - - :type content_type: str - :param content_type: (Optional) The content type of the object referenced - by ``resource``. - - :type response_type: str - :param response_type: (Optional) Content type of responses to requests for - the signed URL. Ignored if content_type is set on - object/blob metadata. - - :type response_disposition: str - :param response_disposition: (Optional) Content disposition of responses to - requests for the signed URL. - - :type generation: str - :param generation: (Optional) A value that indicates which generation of - the resource to fetch. - - :type headers: dict - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :type query_parameters: dict - :param query_parameters: - (Optional) Additional query paramtersto be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers#query - - :type service_account_email: str - :param service_account_email: (Optional) E-mail address of the service account. - - :type access_token: str - :param access_token: (Optional) Access token for a service account. - - :raises: :exc:`TypeError` when expiration is not a valid type. - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - - :rtype: str - :returns: A signed URL you can use to access the resource - until expiration. - """ - ensure_signed_credentials(credentials) - expiration_seconds = get_expiration_seconds_v4(expiration) - - if _request_timestamp is None: - now = NOW() - request_timestamp = now.strftime("%Y%m%dT%H%M%SZ") - datestamp = now.date().strftime("%Y%m%d") - else: - request_timestamp = _request_timestamp - datestamp = _request_timestamp[:8] - - client_email = credentials.signer_email - credential_scope = "{}/auto/storage/goog4_request".format(datestamp) - credential = "{}/{}".format(client_email, credential_scope) - - if headers is None: - headers = {} - - if content_type is not None: - headers["Content-Type"] = content_type - - if content_md5 is not None: - headers["Content-MD5"] = content_md5 - - header_names = [key.lower() for key in headers] - if "host" not in header_names: - headers["Host"] = "storage.googleapis.com" - - if method.upper() == "RESUMABLE": - method = "POST" - headers["x-goog-resumable"] = "start" - - canonical_headers, ordered_headers = get_canonical_headers(headers) - canonical_header_string = ( - "\n".join(canonical_headers) + "\n" - ) # Yes, Virginia, the extra newline is part of the spec. - signed_headers = ";".join([key for key, _ in ordered_headers]) - - if query_parameters is None: - query_parameters = {} - else: - query_parameters = {key: value or "" for key, value in query_parameters.items()} - - query_parameters["X-Goog-Algorithm"] = "GOOG4-RSA-SHA256" - query_parameters["X-Goog-Credential"] = credential - query_parameters["X-Goog-Date"] = request_timestamp - query_parameters["X-Goog-Expires"] = expiration_seconds - query_parameters["X-Goog-SignedHeaders"] = signed_headers - - if response_type is not None: - query_parameters["response-content-type"] = response_type - - if response_disposition is not None: - query_parameters["response-content-disposition"] = response_disposition - - if generation is not None: - query_parameters["generation"] = generation - - ordered_query_parameters = sorted(query_parameters.items()) - canonical_query_string = six.moves.urllib.parse.urlencode(ordered_query_parameters) - - canonical_elements = [ - method, - resource, - canonical_query_string, - canonical_header_string, - signed_headers, - "UNSIGNED-PAYLOAD", - ] - canonical_request = "\n".join(canonical_elements) - - canonical_request_hash = hashlib.sha256( - canonical_request.encode("ascii") - ).hexdigest() - - string_elements = [ - "GOOG4-RSA-SHA256", - request_timestamp, - credential_scope, - canonical_request_hash, - ] - string_to_sign = "\n".join(string_elements) - - if access_token and service_account_email: - signature = _sign_message(string_to_sign, access_token, service_account_email) - signature_bytes = base64.b64decode(signature) - signature = binascii.hexlify(signature_bytes).decode("ascii") - else: - signature_bytes = credentials.sign_bytes(string_to_sign.encode("ascii")) - signature = binascii.hexlify(signature_bytes).decode("ascii") - - return "{}{}?{}&X-Goog-Signature={}".format( - api_access_endpoint, resource, canonical_query_string, signature - ) - - -def _sign_message(message, access_token, service_account_email): - - """Signs a message. - - :type message: str - :param message: The message to be signed. - - :type access_token: str - :param access_token: Access token for a service account. - - - :type service_account_email: str - :param service_account_email: E-mail address of the service account. - - :raises: :exc:`TransportError` if an `access_token` is unauthorized. - - :rtype: str - :returns: The signature of the message. - - """ - message = _helpers._to_bytes(message) - - method = "POST" - url = "https://iam.googleapis.com/v1/projects/-/serviceAccounts/{}:signBlob?alt=json".format( - service_account_email - ) - headers = { - "Authorization": "Bearer " + access_token, - "Content-type": "application/json", - } - body = json.dumps({"bytesToSign": base64.b64encode(message).decode("utf-8")}) - - request = requests.Request() - response = request(url=url, method=method, body=body, headers=headers) - - if response.status != six.moves.http_client.OK: - raise exceptions.TransportError( - "Error calling the IAM signBytes API: {}".format(response.data) - ) - - data = json.loads(response.data.decode("utf-8")) - return data["signature"] diff --git a/storage/google/cloud/storage/acl.py b/storage/google/cloud/storage/acl.py deleted file mode 100644 index 9b1af1d87f2f..000000000000 --- a/storage/google/cloud/storage/acl.py +++ /dev/null @@ -1,612 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manipulate access control lists that Cloud Storage provides. - -:class:`google.cloud.storage.bucket.Bucket` has a getting method that creates -an ACL object under the hood, and you can interact with that using -:func:`google.cloud.storage.bucket.Bucket.acl`: - -.. literalinclude:: snippets.py - :start-after: [START client_bucket_acl] - :end-before: [END client_bucket_acl] - - -Adding and removing permissions can be done with the following methods -(in increasing order of granularity): - -- :func:`ACL.all` - corresponds to access for all users. -- :func:`ACL.all_authenticated` corresponds - to access for all users that are signed into a Google account. -- :func:`ACL.domain` corresponds to access on a - per Google Apps domain (ie, ``example.com``). -- :func:`ACL.group` corresponds to access on a - per group basis (either by ID or e-mail address). -- :func:`ACL.user` corresponds to access on a - per user basis (either by ID or e-mail address). - -And you are able to ``grant`` and ``revoke`` the following roles: - -- **Reading**: - :func:`_ACLEntity.grant_read` and :func:`_ACLEntity.revoke_read` -- **Writing**: - :func:`_ACLEntity.grant_write` and :func:`_ACLEntity.revoke_write` -- **Owning**: - :func:`_ACLEntity.grant_owner` and :func:`_ACLEntity.revoke_owner` - -You can use any of these like any other factory method (these happen to -be :class:`_ACLEntity` factories): - -.. literalinclude:: snippets.py - :start-after: [START acl_user_settings] - :end-before: [END acl_user_settings] - -After that, you can save any changes you make with the -:func:`google.cloud.storage.acl.ACL.save` method: - -.. literalinclude:: snippets.py - :start-after: [START acl_save] - :end-before: [END acl_save] - -You can alternatively save any existing :class:`google.cloud.storage.acl.ACL` -object (whether it was created by a factory method or not) from a -:class:`google.cloud.storage.bucket.Bucket`: - -.. literalinclude:: snippets.py - :start-after: [START acl_save_bucket] - :end-before: [END acl_save_bucket] - -To get the list of ``entity`` and ``role`` for each unique pair, the -:class:`ACL` class is iterable: - -.. literalinclude:: snippets.py - :start-after: [START acl_print] - :end-before: [END acl_print] - -This list of tuples can be used as the ``entity`` and ``role`` fields -when sending metadata for ACLs to the API. -""" - - -class _ACLEntity(object): - """Class representing a set of roles for an entity. - - This is a helper class that you likely won't ever construct - outside of using the factor methods on the :class:`ACL` object. - - :type entity_type: str - :param entity_type: The type of entity (ie, 'group' or 'user'). - - :type identifier: str - :param identifier: The ID or e-mail of the entity. For the special - entity types (like 'allUsers') this is optional. - """ - - READER_ROLE = "READER" - WRITER_ROLE = "WRITER" - OWNER_ROLE = "OWNER" - - def __init__(self, entity_type, identifier=None): - self.identifier = identifier - self.roles = set([]) - self.type = entity_type - - def __str__(self): - if not self.identifier: - return str(self.type) - else: - return "{acl.type}-{acl.identifier}".format(acl=self) - - def __repr__(self): - return "".format( - acl=self, roles=", ".join(self.roles) - ) - - def get_roles(self): - """Get the list of roles permitted by this entity. - - :rtype: list of strings - :returns: The list of roles associated with this entity. - """ - return self.roles - - def grant(self, role): - """Add a role to the entity. - - :type role: str - :param role: The role to add to the entity. - """ - self.roles.add(role) - - def revoke(self, role): - """Remove a role from the entity. - - :type role: str - :param role: The role to remove from the entity. - """ - if role in self.roles: - self.roles.remove(role) - - def grant_read(self): - """Grant read access to the current entity.""" - self.grant(_ACLEntity.READER_ROLE) - - def grant_write(self): - """Grant write access to the current entity.""" - self.grant(_ACLEntity.WRITER_ROLE) - - def grant_owner(self): - """Grant owner access to the current entity.""" - self.grant(_ACLEntity.OWNER_ROLE) - - def revoke_read(self): - """Revoke read access from the current entity.""" - self.revoke(_ACLEntity.READER_ROLE) - - def revoke_write(self): - """Revoke write access from the current entity.""" - self.revoke(_ACLEntity.WRITER_ROLE) - - def revoke_owner(self): - """Revoke owner access from the current entity.""" - self.revoke(_ACLEntity.OWNER_ROLE) - - -class ACL(object): - """Container class representing a list of access controls.""" - - _URL_PATH_ELEM = "acl" - _PREDEFINED_QUERY_PARAM = "predefinedAcl" - - PREDEFINED_XML_ACLS = { - # XML API name -> JSON API name - "project-private": "projectPrivate", - "public-read": "publicRead", - "public-read-write": "publicReadWrite", - "authenticated-read": "authenticatedRead", - "bucket-owner-read": "bucketOwnerRead", - "bucket-owner-full-control": "bucketOwnerFullControl", - } - - PREDEFINED_JSON_ACLS = frozenset( - [ - "private", - "projectPrivate", - "publicRead", - "publicReadWrite", - "authenticatedRead", - "bucketOwnerRead", - "bucketOwnerFullControl", - ] - ) - """See - https://cloud.google.com/storage/docs/access-control/lists#predefined-acl - """ - - loaded = False - - # Subclasses must override to provide these attributes (typically, - # as properties). - reload_path = None - save_path = None - user_project = None - - def __init__(self): - self.entities = {} - - def _ensure_loaded(self): - """Load if not already loaded.""" - if not self.loaded: - self.reload() - - @classmethod - def validate_predefined(cls, predefined): - """Ensures predefined is in list of predefined json values - - :type predefined: str - :param predefined: name of a predefined acl - - :type predefined: str - :param predefined: validated JSON name of predefined acl - - :raises: :exc: `ValueError`: If predefined is not a valid acl - """ - predefined = cls.PREDEFINED_XML_ACLS.get(predefined, predefined) - if predefined and predefined not in cls.PREDEFINED_JSON_ACLS: - raise ValueError("Invalid predefined ACL: %s" % (predefined,)) - return predefined - - def reset(self): - """Remove all entities from the ACL, and clear the ``loaded`` flag.""" - self.entities.clear() - self.loaded = False - - def __iter__(self): - self._ensure_loaded() - - for entity in self.entities.values(): - for role in entity.get_roles(): - if role: - yield {"entity": str(entity), "role": role} - - def entity_from_dict(self, entity_dict): - """Build an _ACLEntity object from a dictionary of data. - - An entity is a mutable object that represents a list of roles - belonging to either a user or group or the special types for all - users and all authenticated users. - - :type entity_dict: dict - :param entity_dict: Dictionary full of data from an ACL lookup. - - :rtype: :class:`_ACLEntity` - :returns: An Entity constructed from the dictionary. - """ - entity = entity_dict["entity"] - role = entity_dict["role"] - - if entity == "allUsers": - entity = self.all() - - elif entity == "allAuthenticatedUsers": - entity = self.all_authenticated() - - elif "-" in entity: - entity_type, identifier = entity.split("-", 1) - entity = self.entity(entity_type=entity_type, identifier=identifier) - - if not isinstance(entity, _ACLEntity): - raise ValueError("Invalid dictionary: %s" % entity_dict) - - entity.grant(role) - return entity - - def has_entity(self, entity): - """Returns whether or not this ACL has any entries for an entity. - - :type entity: :class:`_ACLEntity` - :param entity: The entity to check for existence in this ACL. - - :rtype: bool - :returns: True of the entity exists in the ACL. - """ - self._ensure_loaded() - return str(entity) in self.entities - - def get_entity(self, entity, default=None): - """Gets an entity object from the ACL. - - :type entity: :class:`_ACLEntity` or string - :param entity: The entity to get lookup in the ACL. - - :type default: anything - :param default: This value will be returned if the entity - doesn't exist. - - :rtype: :class:`_ACLEntity` - :returns: The corresponding entity or the value provided - to ``default``. - """ - self._ensure_loaded() - return self.entities.get(str(entity), default) - - def add_entity(self, entity): - """Add an entity to the ACL. - - :type entity: :class:`_ACLEntity` - :param entity: The entity to add to this ACL. - """ - self._ensure_loaded() - self.entities[str(entity)] = entity - - def entity(self, entity_type, identifier=None): - """Factory method for creating an Entity. - - If an entity with the same type and identifier already exists, - this will return a reference to that entity. If not, it will - create a new one and add it to the list of known entities for - this ACL. - - :type entity_type: str - :param entity_type: The type of entity to create - (ie, ``user``, ``group``, etc) - - :type identifier: str - :param identifier: The ID of the entity (if applicable). - This can be either an ID or an e-mail address. - - :rtype: :class:`_ACLEntity` - :returns: A new Entity or a reference to an existing identical entity. - """ - entity = _ACLEntity(entity_type=entity_type, identifier=identifier) - if self.has_entity(entity): - entity = self.get_entity(entity) - else: - self.add_entity(entity) - return entity - - def user(self, identifier): - """Factory method for a user Entity. - - :type identifier: str - :param identifier: An id or e-mail for this particular user. - - :rtype: :class:`_ACLEntity` - :returns: An Entity corresponding to this user. - """ - return self.entity("user", identifier=identifier) - - def group(self, identifier): - """Factory method for a group Entity. - - :type identifier: str - :param identifier: An id or e-mail for this particular group. - - :rtype: :class:`_ACLEntity` - :returns: An Entity corresponding to this group. - """ - return self.entity("group", identifier=identifier) - - def domain(self, domain): - """Factory method for a domain Entity. - - :type domain: str - :param domain: The domain for this entity. - - :rtype: :class:`_ACLEntity` - :returns: An entity corresponding to this domain. - """ - return self.entity("domain", identifier=domain) - - def all(self): - """Factory method for an Entity representing all users. - - :rtype: :class:`_ACLEntity` - :returns: An entity representing all users. - """ - return self.entity("allUsers") - - def all_authenticated(self): - """Factory method for an Entity representing all authenticated users. - - :rtype: :class:`_ACLEntity` - :returns: An entity representing all authenticated users. - """ - return self.entity("allAuthenticatedUsers") - - def get_entities(self): - """Get a list of all Entity objects. - - :rtype: list of :class:`_ACLEntity` objects - :returns: A list of all Entity objects. - """ - self._ensure_loaded() - return list(self.entities.values()) - - @property - def client(self): - """Abstract getter for the object client.""" - raise NotImplementedError - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current ACL. - - :rtype: :class:`google.cloud.storage.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.client - return client - - def reload(self, client=None): - """Reload the ACL data from Cloud Storage. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - path = self.reload_path - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - self.entities.clear() - - found = client._connection.api_request( - method="GET", path=path, query_params=query_params - ) - self.loaded = True - for entry in found.get("items", ()): - self.add_entity(self.entity_from_dict(entry)) - - def _save(self, acl, predefined, client): - """Helper for :meth:`save` and :meth:`save_predefined`. - - :type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list. - :param acl: The ACL object to save. If left blank, this will save - current entries. - - :type predefined: str - :param predefined: - (Optional) An identifier for a predefined ACL. Must be one of the - keys in :attr:`PREDEFINED_JSON_ACLS` If passed, `acl` must be None. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - query_params = {"projection": "full"} - if predefined is not None: - acl = [] - query_params[self._PREDEFINED_QUERY_PARAM] = predefined - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - path = self.save_path - client = self._require_client(client) - - result = client._connection.api_request( - method="PATCH", - path=path, - data={self._URL_PATH_ELEM: list(acl)}, - query_params=query_params, - ) - self.entities.clear() - for entry in result.get(self._URL_PATH_ELEM, ()): - self.add_entity(self.entity_from_dict(entry)) - self.loaded = True - - def save(self, acl=None, client=None): - """Save this ACL for the current bucket. - - If :attr:`user_project` is set, bills the API request to that project. - - :type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list. - :param acl: The ACL object to save. If left blank, this will save - current entries. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - if acl is None: - acl = self - save_to_backend = acl.loaded - else: - save_to_backend = True - - if save_to_backend: - self._save(acl, None, client) - - def save_predefined(self, predefined, client=None): - """Save this ACL for the current bucket using a predefined ACL. - - If :attr:`user_project` is set, bills the API request to that project. - - :type predefined: str - :param predefined: An identifier for a predefined ACL. Must be one - of the keys in :attr:`PREDEFINED_JSON_ACLS` - or :attr:`PREDEFINED_XML_ACLS` (which will be - aliased to the corresponding JSON name). - If passed, `acl` must be None. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - predefined = self.validate_predefined(predefined) - self._save(None, predefined, client) - - def clear(self, client=None): - """Remove all ACL entries. - - If :attr:`user_project` is set, bills the API request to that project. - - Note that this won't actually remove *ALL* the rules, but it - will remove all the non-default rules. In short, you'll still - have access to a bucket that you created even after you clear - ACL rules with this method. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - self.save([], client=client) - - -class BucketACL(ACL): - """An ACL specifically for a bucket. - - :type bucket: :class:`google.cloud.storage.bucket.Bucket` - :param bucket: The bucket to which this ACL relates. - """ - - def __init__(self, bucket): - super(BucketACL, self).__init__() - self.bucket = bucket - - @property - def client(self): - """The client bound to this ACL's bucket.""" - return self.bucket.client - - @property - def reload_path(self): - """Compute the path for GET API requests for this ACL.""" - return "%s/%s" % (self.bucket.path, self._URL_PATH_ELEM) - - @property - def save_path(self): - """Compute the path for PATCH API requests for this ACL.""" - return self.bucket.path - - @property - def user_project(self): - """Compute the user project charged for API requests for this ACL.""" - return self.bucket.user_project - - -class DefaultObjectACL(BucketACL): - """A class representing the default object ACL for a bucket.""" - - _URL_PATH_ELEM = "defaultObjectAcl" - _PREDEFINED_QUERY_PARAM = "predefinedDefaultObjectAcl" - - -class ObjectACL(ACL): - """An ACL specifically for a Cloud Storage object / blob. - - :type blob: :class:`google.cloud.storage.blob.Blob` - :param blob: The blob that this ACL corresponds to. - """ - - def __init__(self, blob): - super(ObjectACL, self).__init__() - self.blob = blob - - @property - def client(self): - """The client bound to this ACL's blob.""" - return self.blob.client - - @property - def reload_path(self): - """Compute the path for GET API requests for this ACL.""" - return "%s/acl" % self.blob.path - - @property - def save_path(self): - """Compute the path for PATCH API requests for this ACL.""" - return self.blob.path - - @property - def user_project(self): - """Compute the user project charged for API requests for this ACL.""" - return self.blob.user_project diff --git a/storage/google/cloud/storage/batch.py b/storage/google/cloud/storage/batch.py deleted file mode 100644 index 92f1a18d1c1d..000000000000 --- a/storage/google/cloud/storage/batch.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Batch updates / deletes of storage buckets / blobs. - -See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch -""" -from email.encoders import encode_noop -from email.generator import Generator -from email.mime.application import MIMEApplication -from email.mime.multipart import MIMEMultipart -from email.parser import Parser -import io -import json - -import requests -import six - -from google.cloud import _helpers -from google.cloud import exceptions -from google.cloud.storage._http import Connection - - -class MIMEApplicationHTTP(MIMEApplication): - """MIME type for ``application/http``. - - Constructs payload from headers and body - - :type method: str - :param method: HTTP method - - :type uri: str - :param uri: URI for HTTP request - - :type headers: dict - :param headers: HTTP headers - - :type body: str - :param body: (Optional) HTTP payload - - """ - - def __init__(self, method, uri, headers, body): - if isinstance(body, dict): - body = json.dumps(body) - headers["Content-Type"] = "application/json" - headers["Content-Length"] = len(body) - if body is None: - body = "" - lines = ["%s %s HTTP/1.1" % (method, uri)] - lines.extend( - ["%s: %s" % (key, value) for key, value in sorted(headers.items())] - ) - lines.append("") - lines.append(body) - payload = "\r\n".join(lines) - if six.PY2: - # email.message.Message is an old-style class, so we - # cannot use 'super()'. - MIMEApplication.__init__(self, payload, "http", encode_noop) - else: # pragma: NO COVER Python3 - super_init = super(MIMEApplicationHTTP, self).__init__ - super_init(payload, "http", encode_noop) - - -class _FutureDict(object): - """Class to hold a future value for a deferred request. - - Used by for requests that get sent in a :class:`Batch`. - """ - - @staticmethod - def get(key, default=None): - """Stand-in for dict.get. - - :type key: object - :param key: Hashable dictionary key. - - :type default: object - :param default: Fallback value to dict.get. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError("Cannot get(%r, default=%r) on a future" % (key, default)) - - def __getitem__(self, key): - """Stand-in for dict[key]. - - :type key: object - :param key: Hashable dictionary key. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError("Cannot get item %r from a future" % (key,)) - - def __setitem__(self, key, value): - """Stand-in for dict[key] = value. - - :type key: object - :param key: Hashable dictionary key. - - :type value: object - :param value: Dictionary value. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError("Cannot set %r -> %r on a future" % (key, value)) - - -class _FutureResponse(requests.Response): - """Reponse that returns a placeholder dictionary for a batched requests.""" - - def __init__(self, future_dict): - super(_FutureResponse, self).__init__() - self._future_dict = future_dict - self.status_code = 204 - - def json(self): - return self._future_dict - - @property - def content(self): - return self._future_dict - - -class Batch(Connection): - """Proxy an underlying connection, batching up change operations. - - :type client: :class:`google.cloud.storage.client.Client` - :param client: The client to use for making connections. - """ - - _MAX_BATCH_SIZE = 1000 - - def __init__(self, client): - super(Batch, self).__init__(client) - self._requests = [] - self._target_objects = [] - - def _do_request(self, method, url, headers, data, target_object, timeout=None): - """Override Connection: defer actual HTTP request. - - Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. - - :type method: str - :param method: The HTTP method to use in the request. - - :type url: str - :param url: The URL to send the request to. - - :type headers: dict - :param headers: A dictionary of HTTP headers to send with the request. - - :type data: str - :param data: The data to send as the body of the request. - - :type target_object: object - :param target_object: - (Optional) This allows us to enable custom behavior in our batch - connection. Here we defer an HTTP request and complete - initialization of the object at a later time. - - :type timeout: float or tuple - :param timeout: (optional) The amount of time, in seconds, to wait - for the server response. By default, the method waits indefinitely. - Can also be passed as a tuple (connect_timeout, read_timeout). - See :meth:`requests.Session.request` documentation for details. - - :rtype: tuple of ``response`` (a dictionary of sorts) - and ``content`` (a string). - :returns: The HTTP response object and the content of the response. - """ - if len(self._requests) >= self._MAX_BATCH_SIZE: - raise ValueError( - "Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE - ) - self._requests.append((method, url, headers, data, timeout)) - result = _FutureDict() - self._target_objects.append(target_object) - if target_object is not None: - target_object._properties = result - return _FutureResponse(result) - - def _prepare_batch_request(self): - """Prepares headers and body for a batch request. - - :rtype: tuple (dict, str) - :returns: The pair of headers and body of the batch request to be sent. - :raises: :class:`ValueError` if no requests have been deferred. - """ - if len(self._requests) == 0: - raise ValueError("No deferred requests") - - multi = MIMEMultipart() - - # Use timeout of last request, default to None (indefinite) - timeout = None - for method, uri, headers, body, _timeout in self._requests: - subrequest = MIMEApplicationHTTP(method, uri, headers, body) - multi.attach(subrequest) - timeout = _timeout - - # The `email` package expects to deal with "native" strings - if six.PY2: # pragma: NO COVER Python3 - buf = io.BytesIO() - else: - buf = io.StringIO() - generator = Generator(buf, False, 0) - generator.flatten(multi) - payload = buf.getvalue() - - # Strip off redundant header text - _, body = payload.split("\n\n", 1) - return dict(multi._headers), body, timeout - - def _finish_futures(self, responses): - """Apply all the batch responses to the futures created. - - :type responses: list of (headers, payload) tuples. - :param responses: List of headers and payloads from each response in - the batch. - - :raises: :class:`ValueError` if no requests have been deferred. - """ - # If a bad status occurs, we track it, but don't raise an exception - # until all futures have been populated. - exception_args = None - - if len(self._target_objects) != len(responses): # pragma: NO COVER - raise ValueError("Expected a response for every request.") - - for target_object, subresponse in zip(self._target_objects, responses): - if not 200 <= subresponse.status_code < 300: - exception_args = exception_args or subresponse - elif target_object is not None: - try: - target_object._properties = subresponse.json() - except ValueError: - target_object._properties = subresponse.content - - if exception_args is not None: - raise exceptions.from_http_response(exception_args) - - def finish(self): - """Submit a single `multipart/mixed` request with deferred requests. - - :rtype: list of tuples - :returns: one ``(headers, payload)`` tuple per deferred request. - """ - headers, body, timeout = self._prepare_batch_request() - - url = "%s/batch/storage/v1" % self.API_BASE_URL - - # Use the private ``_base_connection`` rather than the property - # ``_connection``, since the property may be this - # current batch. - response = self._client._base_connection._make_request( - "POST", url, data=body, headers=headers, timeout=timeout - ) - responses = list(_unpack_batch_response(response)) - self._finish_futures(responses) - return responses - - def current(self): - """Return the topmost batch, or None.""" - return self._client.current_batch - - def __enter__(self): - self._client._push_batch(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - if exc_type is None: - self.finish() - finally: - self._client._pop_batch() - - -def _generate_faux_mime_message(parser, response): - """Convert response, content -> (multipart) email.message. - - Helper for _unpack_batch_response. - """ - # We coerce to bytes to get consistent concat across - # Py2 and Py3. Percent formatting is insufficient since - # it includes the b in Py3. - content_type = _helpers._to_bytes(response.headers.get("content-type", "")) - - faux_message = b"".join( - [b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content] - ) - - if six.PY2: - return parser.parsestr(faux_message) - else: # pragma: NO COVER Python3 - return parser.parsestr(faux_message.decode("utf-8")) - - -def _unpack_batch_response(response): - """Convert requests.Response -> [(headers, payload)]. - - Creates a generator of tuples of emulating the responses to - :meth:`requests.Session.request`. - - :type response: :class:`requests.Response` - :param response: HTTP response / headers from a request. - """ - parser = Parser() - message = _generate_faux_mime_message(parser, response) - - if not isinstance(message._payload, list): # pragma: NO COVER - raise ValueError("Bad response: not multi-part") - - for subrequest in message._payload: - status_line, rest = subrequest._payload.split("\n", 1) - _, status, _ = status_line.split(" ", 2) - sub_message = parser.parsestr(rest) - payload = sub_message._payload - msg_headers = dict(sub_message._headers) - content_id = msg_headers.get("Content-ID") - - subresponse = requests.Response() - subresponse.request = requests.Request( - method="BATCH", url="contentid://{}".format(content_id) - ).prepare() - subresponse.status_code = int(status) - subresponse.headers.update(msg_headers) - subresponse._content = payload.encode("utf-8") - - yield subresponse diff --git a/storage/google/cloud/storage/blob.py b/storage/google/cloud/storage/blob.py deleted file mode 100644 index f134c3e45722..000000000000 --- a/storage/google/cloud/storage/blob.py +++ /dev/null @@ -1,2203 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# pylint: disable=too-many-lines - -"""Create / interact with Google Cloud Storage blobs. - -.. _API reference docs: https://cloud.google.com/storage/docs/\ - json_api/v1/objects -.. _customer-supplied: https://cloud.google.com/storage/docs/\ - encryption#customer-supplied -.. _google-resumable-media: https://googleapis.github.io/\ - google-resumable-media-python/latest/\ - google.resumable_media.requests.html -""" - -import base64 -import copy -import hashlib -from io import BytesIO -import mimetypes -import os -import time -import warnings - -from six.moves.urllib.parse import parse_qsl -from six.moves.urllib.parse import quote -from six.moves.urllib.parse import urlencode -from six.moves.urllib.parse import urlsplit -from six.moves.urllib.parse import urlunsplit - -from google import resumable_media -from google.resumable_media.requests import ChunkedDownload -from google.resumable_media.requests import Download -from google.resumable_media.requests import RawDownload -from google.resumable_media.requests import RawChunkedDownload -from google.resumable_media.requests import MultipartUpload -from google.resumable_media.requests import ResumableUpload - -from google.api_core.iam import Policy -from google.cloud import exceptions -from google.cloud._helpers import _bytes_to_unicode -from google.cloud._helpers import _rfc3339_to_datetime -from google.cloud._helpers import _to_bytes -from google.cloud.exceptions import NotFound -from google.cloud.storage._helpers import _get_storage_host -from google.cloud.storage._helpers import _PropertyMixin -from google.cloud.storage._helpers import _scalar_property -from google.cloud.storage._signing import generate_signed_url_v2 -from google.cloud.storage._signing import generate_signed_url_v4 -from google.cloud.storage.acl import ACL -from google.cloud.storage.acl import ObjectACL -from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS -from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS -from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS -from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS -from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS -from google.cloud.storage.constants import STANDARD_STORAGE_CLASS - -_STORAGE_HOST = _get_storage_host() - -_API_ACCESS_ENDPOINT = "https://storage.googleapis.com" -_DEFAULT_CONTENT_TYPE = u"application/octet-stream" -_DOWNLOAD_URL_TEMPLATE = _STORAGE_HOST + u"/download/storage/v1{path}?alt=media" -_BASE_UPLOAD_TEMPLATE = _STORAGE_HOST + u"/upload/storage/v1{bucket_path}/o?uploadType=" -_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"multipart" -_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"resumable" -# NOTE: "acl" is also writeable but we defer ACL management to -# the classes in the google.cloud.storage.acl module. -_CONTENT_TYPE_FIELD = "contentType" -_WRITABLE_FIELDS = ( - "cacheControl", - "contentDisposition", - "contentEncoding", - "contentLanguage", - _CONTENT_TYPE_FIELD, - "crc32c", - "md5Hash", - "metadata", - "name", - "storageClass", -) -_NUM_RETRIES_MESSAGE = ( - "`num_retries` has been deprecated and will be removed in a future " - "release. The default behavior (when `num_retries` is not specified) when " - "a transient error (e.g. 429 Too Many Requests or 500 Internal Server " - "Error) occurs will be as follows: upload requests will be automatically " - "retried. Subsequent retries will be sent after waiting 1, 2, 4, 8, etc. " - "seconds (exponential backoff) until 10 minutes of wait time have " - "elapsed. At that point, there will be no more attempts to retry." -) -_READ_LESS_THAN_SIZE = ( - "Size {:d} was specified but the file-like object only had " "{:d} bytes remaining." -) - -_DEFAULT_CHUNKSIZE = 104857600 # 1024 * 1024 B * 100 = 100 MB -_MAX_MULTIPART_SIZE = 8388608 # 8 MB - - -class Blob(_PropertyMixin): - """A wrapper around Cloud Storage's concept of an ``Object``. - - :type name: str - :param name: The name of the blob. This corresponds to the unique path of - the object in the bucket. If bytes, will be converted to a - unicode object. Blob / object names can contain any sequence - of valid unicode characters, of length 1-1024 bytes when - UTF-8 encoded. - - :type bucket: :class:`google.cloud.storage.bucket.Bucket` - :param bucket: The bucket to which this blob belongs. - - :type chunk_size: int - - :param chunk_size: The size of a chunk of data whenever iterating (in - bytes). This must be a multiple of 256 KB per the API - specification. - - :type encryption_key: bytes - :param encryption_key: - Optional 32 byte encryption key for customer-supplied encryption. - See https://cloud.google.com/storage/docs/encryption#customer-supplied. - - :type kms_key_name: str - :param kms_key_name: - Optional resource name of Cloud KMS key used to encrypt the blob's - contents. - """ - - _chunk_size = None # Default value for each instance. - _CHUNK_SIZE_MULTIPLE = 256 * 1024 - """Number (256 KB, in bytes) that must divide the chunk size.""" - - STORAGE_CLASSES = ( - STANDARD_STORAGE_CLASS, - NEARLINE_STORAGE_CLASS, - COLDLINE_STORAGE_CLASS, - ARCHIVE_STORAGE_CLASS, - MULTI_REGIONAL_LEGACY_STORAGE_CLASS, - REGIONAL_LEGACY_STORAGE_CLASS, - ) - """Allowed values for :attr:`storage_class`. - - See - https://cloud.google.com/storage/docs/json_api/v1/objects#storageClass - https://cloud.google.com/storage/docs/per-object-storage-class - - .. note:: - This list does not include 'DURABLE_REDUCED_AVAILABILITY', which - is only documented for buckets (and deprecated). - """ - - def __init__( - self, - name, - bucket, - chunk_size=None, - encryption_key=None, - kms_key_name=None, - generation=None, - ): - name = _bytes_to_unicode(name) - super(Blob, self).__init__(name=name) - - self.chunk_size = chunk_size # Check that setter accepts value. - self._bucket = bucket - self._acl = ObjectACL(self) - if encryption_key is not None and kms_key_name is not None: - raise ValueError( - "Pass at most one of 'encryption_key' " "and 'kms_key_name'" - ) - - self._encryption_key = encryption_key - - if kms_key_name is not None: - self._properties["kmsKeyName"] = kms_key_name - - if generation is not None: - self._properties["generation"] = generation - - @property - def bucket(self): - """Bucket which contains the object. - - :rtype: :class:`~google.cloud.storage.bucket.Bucket` - :returns: The object's bucket. - """ - return self._bucket - - @property - def chunk_size(self): - """Get the blob's default chunk size. - - :rtype: int or ``NoneType`` - :returns: The current blob's chunk size, if it is set. - """ - return self._chunk_size - - @chunk_size.setter - def chunk_size(self, value): - """Set the blob's default chunk size. - - :type value: int - :param value: (Optional) The current blob's chunk size, if it is set. - - :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a - multiple of 256 KB. - """ - if value is not None and value > 0 and value % self._CHUNK_SIZE_MULTIPLE != 0: - raise ValueError( - "Chunk size must be a multiple of %d." % (self._CHUNK_SIZE_MULTIPLE,) - ) - self._chunk_size = value - - @staticmethod - def path_helper(bucket_path, blob_name): - """Relative URL path for a blob. - - :type bucket_path: str - :param bucket_path: The URL path for a bucket. - - :type blob_name: str - :param blob_name: The name of the blob. - - :rtype: str - :returns: The relative URL path for ``blob_name``. - """ - return bucket_path + "/o/" + _quote(blob_name) - - @property - def acl(self): - """Create our ACL on demand.""" - return self._acl - - def __repr__(self): - if self.bucket: - bucket_name = self.bucket.name - else: - bucket_name = None - - return "" % (bucket_name, self.name, self.generation) - - @property - def path(self): - """Getter property for the URL path to this Blob. - - :rtype: str - :returns: The URL path to this Blob. - """ - if not self.name: - raise ValueError("Cannot determine path without a blob name.") - - return self.path_helper(self.bucket.path, self.name) - - @property - def client(self): - """The client bound to this blob.""" - return self.bucket.client - - @property - def user_project(self): - """Project ID billed for API requests made via this blob. - - Derived from bucket's value. - - :rtype: str - """ - return self.bucket.user_project - - def _encryption_headers(self): - """Return any encryption headers needed to fetch the object. - - :rtype: List(Tuple(str, str)) - :returns: a list of tuples to be passed as headers. - """ - return _get_encryption_headers(self._encryption_key) - - @property - def _query_params(self): - """Default query parameters.""" - params = {} - if self.generation is not None: - params["generation"] = self.generation - if self.user_project is not None: - params["userProject"] = self.user_project - return params - - @property - def public_url(self): - """The public URL for this blob. - - Use :meth:`make_public` to enable anonymous access via the returned - URL. - - :rtype: `string` - :returns: The public URL for this blob. - """ - return "{storage_base_url}/{bucket_name}/{quoted_name}".format( - storage_base_url=_API_ACCESS_ENDPOINT, - bucket_name=self.bucket.name, - quoted_name=_quote(self.name, safe=b"/~"), - ) - - @classmethod - def from_string(cls, uri, client=None): - """Get a constructor for blob object by URI. - - :type uri: str - :param uri: The blob uri pass to get blob object. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. - - :rtype: :class:`google.cloud.storage.blob.Blob` - :returns: The blob object created. - - Example: - Get a constructor for blob object by URI.. - - >>> from google.cloud import storage - >>> from google.cloud.storage.blob import Blob - >>> client = storage.Client() - >>> blob = Blob.from_string("gs://bucket/object") - """ - from google.cloud.storage.bucket import Bucket - - scheme, netloc, path, query, frag = urlsplit(uri) - if scheme != "gs": - raise ValueError("URI scheme must be gs") - - bucket = Bucket(client, name=netloc) - return cls(path[1:], bucket) - - def generate_signed_url( - self, - expiration=None, - api_access_endpoint=_API_ACCESS_ENDPOINT, - method="GET", - content_md5=None, - content_type=None, - response_disposition=None, - response_type=None, - generation=None, - headers=None, - query_parameters=None, - client=None, - credentials=None, - version=None, - service_account_email=None, - access_token=None, - ): - """Generates a signed URL for this blob. - - .. note:: - - If you are on Google Compute Engine, you can't generate a signed - URL using GCE service account. Follow `Issue 50`_ for updates on - this. If you'd like to be able to generate a signed URL from GCE, - you can use a standard service account from a JSON file rather - than a GCE service account. - - .. _Issue 50: https://github.com/GoogleCloudPlatform/\ - google-auth-library-python/issues/50 - - If you have a blob that you want to allow access to for a set - amount of time, you can use this method to generate a URL that - is only valid within a certain time period. - - This is particularly useful if you don't want publicly - accessible blobs, but don't want to require users to explicitly - log in. - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :type api_access_endpoint: str - :param api_access_endpoint: Optional URI base. - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - - :type content_md5: str - :param content_md5: (Optional) The MD5 hash of the object referenced by - ``resource``. - - :type content_type: str - :param content_type: (Optional) The content type of the object - referenced by ``resource``. - - :type response_disposition: str - :param response_disposition: (Optional) Content disposition of - responses to requests for the signed URL. - For example, to enable the signed URL - to initiate a file of ``blog.png``, use - the value - ``'attachment; filename=blob.png'``. - - :type response_type: str - :param response_type: (Optional) Content type of responses to requests - for the signed URL. Ignored if content_type is - set on object/blob metadata. - - :type generation: str - :param generation: (Optional) A value that indicates which generation - of the resource to fetch. - - :type headers: dict - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :type query_parameters: dict - :param query_parameters: - (Optional) Additional query paramtersto be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers#query - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - - :type credentials: :class:`google.auth.credentials.Credentials` or - :class:`NoneType` - :param credentials: The authorization credentials to attach to requests. - These credentials identify this application to the service. - If none are specified, the client will attempt to ascertain - the credentials from the environment. - - :type version: str - :param version: (Optional) The version of signed credential to create. - Must be one of 'v2' | 'v4'. - - :type service_account_email: str - :param service_account_email: (Optional) E-mail address of the service account. - - :type access_token: str - :param access_token: (Optional) Access token for a service account. - - :raises: :exc:`ValueError` when version is invalid. - :raises: :exc:`TypeError` when expiration is not a valid type. - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - - :rtype: str - :returns: A signed URL you can use to access the resource - until expiration. - """ - if version is None: - version = "v2" - elif version not in ("v2", "v4"): - raise ValueError("'version' must be either 'v2' or 'v4'") - - quoted_name = _quote(self.name, safe=b"/~") - resource = "/{bucket_name}/{quoted_name}".format( - bucket_name=self.bucket.name, quoted_name=quoted_name - ) - - if credentials is None: - client = self._require_client(client) - credentials = client._credentials - - if version == "v2": - helper = generate_signed_url_v2 - else: - helper = generate_signed_url_v4 - - if self._encryption_key is not None: - encryption_headers = _get_encryption_headers(self._encryption_key) - if headers is None: - headers = {} - if version == "v2": - # See: https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers - v2_copy_only = "X-Goog-Encryption-Algorithm" - headers[v2_copy_only] = encryption_headers[v2_copy_only] - else: - headers.update(encryption_headers) - - return helper( - credentials, - resource=resource, - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method.upper(), - content_md5=content_md5, - content_type=content_type, - response_type=response_type, - response_disposition=response_disposition, - generation=generation, - headers=headers, - query_parameters=query_parameters, - service_account_email=service_account_email, - access_token=access_token, - ) - - def exists(self, client=None): - """Determines whether or not this blob exists. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: bool - :returns: True if the blob exists in Cloud Storage. - """ - client = self._require_client(client) - # We only need the status code (200 or not) so we seek to - # minimize the returned payload. - query_params = self._query_params - query_params["fields"] = "name" - - try: - # We intentionally pass `_target_object=None` since fields=name - # would limit the local properties. - client._connection.api_request( - method="GET", - path=self.path, - query_params=query_params, - _target_object=None, - ) - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - return True - except NotFound: - return False - - def delete(self, client=None): - """Deletes a blob from Cloud Storage. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :raises: :class:`google.cloud.exceptions.NotFound` - (propagated from - :meth:`google.cloud.storage.bucket.Bucket.delete_blob`). - """ - self.bucket.delete_blob(self.name, client=client, generation=self.generation) - - def _get_transport(self, client): - """Return the client's transport. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype transport: - :class:`~google.auth.transport.requests.AuthorizedSession` - :returns: The transport (with credentials) that will - make authenticated requests. - """ - client = self._require_client(client) - return client._http - - def _get_download_url(self): - """Get the download URL for the current blob. - - If the ``media_link`` has been loaded, it will be used, otherwise - the URL will be constructed from the current blob's path (and possibly - generation) to avoid a round trip. - - :rtype: str - :returns: The download URL for the current blob. - """ - name_value_pairs = [] - if self.media_link is None: - base_url = _DOWNLOAD_URL_TEMPLATE.format(path=self.path) - if self.generation is not None: - name_value_pairs.append(("generation", "{:d}".format(self.generation))) - else: - base_url = self.media_link - - if self.user_project is not None: - name_value_pairs.append(("userProject", self.user_project)) - - return _add_query_parameters(base_url, name_value_pairs) - - def _do_download( - self, - transport, - file_obj, - download_url, - headers, - start=None, - end=None, - raw_download=False, - ): - """Perform a download without any error handling. - - This is intended to be called by :meth:`download_to_file` so it can - be wrapped with error handling / remapping. - - :type transport: - :class:`~google.auth.transport.requests.AuthorizedSession` - :param transport: The transport (with credentials) that will - make authenticated requests. - - :type file_obj: file - :param file_obj: A file handle to which to write the blob's data. - - :type download_url: str - :param download_url: The URL where the media can be accessed. - - :type headers: dict - :param headers: Optional headers to be sent with the request(s). - - :type start: int - :param start: Optional, the first byte in a range to be downloaded. - - :type end: int - :param end: Optional, The last byte in a range to be downloaded. - - :type raw_download: bool - :param raw_download: - Optional, If true, download the object without any expansion. - """ - if self.chunk_size is None: - if raw_download: - klass = RawDownload - else: - klass = Download - - download = klass( - download_url, stream=file_obj, headers=headers, start=start, end=end - ) - download.consume(transport) - - else: - - if raw_download: - klass = RawChunkedDownload - else: - klass = ChunkedDownload - - download = klass( - download_url, - self.chunk_size, - file_obj, - headers=headers, - start=start if start else 0, - end=end, - ) - - while not download.finished: - download.consume_next_chunk(transport) - - def download_to_file( - self, file_obj, client=None, start=None, end=None, raw_download=False - ): - """Download the contents of this blob into a file-like object. - - .. note:: - - If the server-set property, :attr:`media_link`, is not yet - initialized, makes an additional API request to load it. - - Downloading a file that has been encrypted with a `customer-supplied`_ - encryption key: - - .. literalinclude:: snippets.py - :start-after: [START download_to_file] - :end-before: [END download_to_file] - :dedent: 4 - - The ``encryption_key`` should be a str or bytes with a length of at - least 32. - - For more fine-grained control over the download process, check out - `google-resumable-media`_. For example, this library allows - downloading **parts** of a blob rather than the whole thing. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type file_obj: file - :param file_obj: A file handle to which to write the blob's data. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type start: int - :param start: Optional, the first byte in a range to be downloaded. - - :type end: int - :param end: Optional, The last byte in a range to be downloaded. - - :type raw_download: bool - :param raw_download: - Optional, If true, download the object without any expansion. - - :raises: :class:`google.cloud.exceptions.NotFound` - """ - download_url = self._get_download_url() - headers = _get_encryption_headers(self._encryption_key) - headers["accept-encoding"] = "gzip" - - transport = self._get_transport(client) - try: - self._do_download( - transport, file_obj, download_url, headers, start, end, raw_download - ) - except resumable_media.InvalidResponse as exc: - _raise_from_invalid_response(exc) - - def download_to_filename( - self, filename, client=None, start=None, end=None, raw_download=False - ): - """Download the contents of this blob into a named file. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type filename: str - :param filename: A filename to be passed to ``open``. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type start: int - :param start: Optional, the first byte in a range to be downloaded. - - :type end: int - :param end: Optional, The last byte in a range to be downloaded. - - :type raw_download: bool - :param raw_download: - Optional, If true, download the object without any expansion. - - :raises: :class:`google.cloud.exceptions.NotFound` - """ - try: - with open(filename, "wb") as file_obj: - self.download_to_file( - file_obj, - client=client, - start=start, - end=end, - raw_download=raw_download, - ) - except resumable_media.DataCorruption: - # Delete the corrupt downloaded file. - os.remove(filename) - raise - - updated = self.updated - if updated is not None: - mtime = time.mktime(updated.timetuple()) - os.utime(file_obj.name, (mtime, mtime)) - - def download_as_string(self, client=None, start=None, end=None, raw_download=False): - """Download the contents of this blob as a bytes object. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type start: int - :param start: Optional, the first byte in a range to be downloaded. - - :type end: int - :param end: Optional, The last byte in a range to be downloaded. - - :type raw_download: bool - :param raw_download: - Optional, If true, download the object without any expansion. - - :rtype: bytes - :returns: The data stored in this blob. - :raises: :class:`google.cloud.exceptions.NotFound` - """ - string_buffer = BytesIO() - self.download_to_file( - string_buffer, - client=client, - start=start, - end=end, - raw_download=raw_download, - ) - return string_buffer.getvalue() - - def _get_content_type(self, content_type, filename=None): - """Determine the content type from the current object. - - The return value will be determined in order of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type content_type: str - :param content_type: (Optional) type of content. - - :type filename: str - :param filename: (Optional) The name of the file where the content - is stored. - - :rtype: str - :returns: Type of content gathered from the object. - """ - if content_type is None: - content_type = self.content_type - - if content_type is None and filename is not None: - content_type, _ = mimetypes.guess_type(filename) - - if content_type is None: - content_type = _DEFAULT_CONTENT_TYPE - - return content_type - - def _get_writable_metadata(self): - """Get the object / blob metadata which is writable. - - This is intended to be used when creating a new object / blob. - - See the `API reference docs`_ for more information, the fields - marked as writable are: - - * ``acl`` - * ``cacheControl`` - * ``contentDisposition`` - * ``contentEncoding`` - * ``contentLanguage`` - * ``contentType`` - * ``crc32c`` - * ``md5Hash`` - * ``metadata`` - * ``name`` - * ``storageClass`` - - For now, we don't support ``acl``, access control lists should be - managed directly through :class:`ObjectACL` methods. - """ - # NOTE: This assumes `self.name` is unicode. - object_metadata = {"name": self.name} - for key in self._changes: - if key in _WRITABLE_FIELDS: - object_metadata[key] = self._properties[key] - - return object_metadata - - def _get_upload_arguments(self, content_type): - """Get required arguments for performing an upload. - - The content type returned will be determined in order of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type content_type: str - :param content_type: Type of content being uploaded (or :data:`None`). - - :rtype: tuple - :returns: A triple of - - * A header dictionary - * An object metadata dictionary - * The ``content_type`` as a string (according to precedence) - """ - headers = _get_encryption_headers(self._encryption_key) - object_metadata = self._get_writable_metadata() - content_type = self._get_content_type(content_type) - return headers, object_metadata, content_type - - def _do_multipart_upload( - self, client, stream, content_type, size, num_retries, predefined_acl - ): - """Perform a multipart upload. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. - - :type content_type: str - :param content_type: Type of content being uploaded (or :data:`None`). - - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``stream``). If not provided, the upload will be - concluded once ``stream`` is exhausted (or :data:`None`). - - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - - :rtype: :class:`~requests.Response` - :returns: The "200 OK" response object returned after the multipart - upload request. - :raises: :exc:`ValueError` if ``size`` is not :data:`None` but the - ``stream`` has fewer than ``size`` bytes remaining. - """ - if size is None: - data = stream.read() - else: - data = stream.read(size) - if len(data) < size: - msg = _READ_LESS_THAN_SIZE.format(size, len(data)) - raise ValueError(msg) - - transport = self._get_transport(client) - info = self._get_upload_arguments(content_type) - headers, object_metadata, content_type = info - - base_url = _MULTIPART_URL_TEMPLATE.format(bucket_path=self.bucket.path) - name_value_pairs = [] - - if self.user_project is not None: - name_value_pairs.append(("userProject", self.user_project)) - - if self.kms_key_name is not None: - name_value_pairs.append(("kmsKeyName", self.kms_key_name)) - - if predefined_acl is not None: - name_value_pairs.append(("predefinedAcl", predefined_acl)) - - upload_url = _add_query_parameters(base_url, name_value_pairs) - upload = MultipartUpload(upload_url, headers=headers) - - if num_retries is not None: - upload._retry_strategy = resumable_media.RetryStrategy( - max_retries=num_retries - ) - - response = upload.transmit(transport, data, object_metadata, content_type) - - return response - - def _initiate_resumable_upload( - self, - client, - stream, - content_type, - size, - num_retries, - predefined_acl=None, - extra_headers=None, - chunk_size=None, - ): - """Initiate a resumable upload. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. - - :type content_type: str - :param content_type: Type of content being uploaded (or :data:`None`). - - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``stream``). If not provided, the upload will be - concluded once ``stream`` is exhausted (or :data:`None`). - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) - - :type extra_headers: dict - :param extra_headers: (Optional) Extra headers to add to standard - headers. - - :type chunk_size: int - :param chunk_size: - (Optional) Chunk size to use when creating a - :class:`~google.resumable_media.requests.ResumableUpload`. - If not passed, will fall back to the chunk size on the - current blob. - - :rtype: tuple - :returns: - Pair of - - * The :class:`~google.resumable_media.requests.ResumableUpload` - that was created - * The ``transport`` used to initiate the upload. - """ - if chunk_size is None: - chunk_size = self.chunk_size - if chunk_size is None: - chunk_size = _DEFAULT_CHUNKSIZE - - transport = self._get_transport(client) - info = self._get_upload_arguments(content_type) - headers, object_metadata, content_type = info - if extra_headers is not None: - headers.update(extra_headers) - - base_url = _RESUMABLE_URL_TEMPLATE.format(bucket_path=self.bucket.path) - name_value_pairs = [] - - if self.user_project is not None: - name_value_pairs.append(("userProject", self.user_project)) - - if self.kms_key_name is not None: - name_value_pairs.append(("kmsKeyName", self.kms_key_name)) - - if predefined_acl is not None: - name_value_pairs.append(("predefinedAcl", predefined_acl)) - - upload_url = _add_query_parameters(base_url, name_value_pairs) - upload = ResumableUpload(upload_url, chunk_size, headers=headers) - - if num_retries is not None: - upload._retry_strategy = resumable_media.RetryStrategy( - max_retries=num_retries - ) - - upload.initiate( - transport, - stream, - object_metadata, - content_type, - total_bytes=size, - stream_final=False, - ) - - return upload, transport - - def _do_resumable_upload( - self, client, stream, content_type, size, num_retries, predefined_acl - ): - """Perform a resumable upload. - - Assumes ``chunk_size`` is not :data:`None` on the current blob. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. - - :type content_type: str - :param content_type: Type of content being uploaded (or :data:`None`). - - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``stream``). If not provided, the upload will be - concluded once ``stream`` is exhausted (or :data:`None`). - - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - - :rtype: :class:`~requests.Response` - :returns: The "200 OK" response object returned after the final chunk - is uploaded. - """ - upload, transport = self._initiate_resumable_upload( - client, - stream, - content_type, - size, - num_retries, - predefined_acl=predefined_acl, - ) - - while not upload.finished: - response = upload.transmit_next_chunk(transport) - - return response - - def _do_upload( - self, client, stream, content_type, size, num_retries, predefined_acl - ): - """Determine an upload strategy and then perform the upload. - - If the size of the data to be uploaded exceeds 5 MB a resumable media - request will be used, otherwise the content and the metadata will be - uploaded in a single multipart upload request. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. - - :type content_type: str - :param content_type: Type of content being uploaded (or :data:`None`). - - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``stream``). If not provided, the upload will be - concluded once ``stream`` is exhausted (or :data:`None`). - - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - - :rtype: dict - :returns: The parsed JSON from the "200 OK" response. This will be the - **only** response in the multipart case and it will be the - **final** response in the resumable case. - """ - if size is not None and size <= _MAX_MULTIPART_SIZE: - response = self._do_multipart_upload( - client, stream, content_type, size, num_retries, predefined_acl - ) - else: - response = self._do_resumable_upload( - client, stream, content_type, size, num_retries, predefined_acl - ) - - return response.json() - - def upload_from_file( - self, - file_obj, - rewind=False, - size=None, - content_type=None, - num_retries=None, - client=None, - predefined_acl=None, - ): - """Upload the contents of this blob from a file-like object. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning`_ and `lifecycle`_ API documents - for details. - - Uploading a file with a `customer-supplied`_ encryption key: - - .. literalinclude:: snippets.py - :start-after: [START upload_from_file] - :end-before: [END upload_from_file] - :dedent: 4 - - The ``encryption_key`` should be a str or bytes with a length of at - least 32. - - For more fine-grained over the upload process, check out - `google-resumable-media`_. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type file_obj: file - :param file_obj: A file handle open for reading. - - :type rewind: bool - :param rewind: If True, seek to the beginning of the file handle before - writing the file to Cloud Storage. - - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``file_obj``). If not provided, the upload will be - concluded once ``file_obj`` is exhausted. - - :type content_type: str - :param content_type: Optional type of content being uploaded. - - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - - :raises: :class:`~google.cloud.exceptions.GoogleCloudError` - if the upload response returns an error status. - - .. _object versioning: https://cloud.google.com/storage/\ - docs/object-versioning - .. _lifecycle: https://cloud.google.com/storage/docs/lifecycle - """ - if num_retries is not None: - warnings.warn(_NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2) - - _maybe_rewind(file_obj, rewind=rewind) - predefined_acl = ACL.validate_predefined(predefined_acl) - - try: - created_json = self._do_upload( - client, file_obj, content_type, size, num_retries, predefined_acl - ) - self._set_properties(created_json) - except resumable_media.InvalidResponse as exc: - _raise_from_invalid_response(exc) - - def upload_from_filename( - self, filename, content_type=None, client=None, predefined_acl=None - ): - """Upload this blob's contents from the content of a named file. - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The value given by ``mimetypes.guess_type`` - - The default value ('application/octet-stream') - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type filename: str - :param filename: The path to the file. - - :type content_type: str - :param content_type: Optional type of content being uploaded. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - """ - content_type = self._get_content_type(content_type, filename=filename) - - with open(filename, "rb") as file_obj: - total_bytes = os.fstat(file_obj.fileno()).st_size - self.upload_from_file( - file_obj, - content_type=content_type, - client=client, - size=total_bytes, - predefined_acl=predefined_acl, - ) - - def upload_from_string( - self, data, content_type="text/plain", client=None, predefined_acl=None - ): - """Upload contents of this blob from the provided string. - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type data: bytes or str - :param data: The data to store in this blob. If the value is - text, it will be encoded as UTF-8. - - :type content_type: str - :param content_type: Optional type of content being uploaded. Defaults - to ``'text/plain'``. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :type predefined_acl: str - :param predefined_acl: (Optional) predefined access control list - """ - data = _to_bytes(data, encoding="utf-8") - string_buffer = BytesIO(data) - self.upload_from_file( - file_obj=string_buffer, - size=len(data), - content_type=content_type, - client=client, - predefined_acl=predefined_acl, - ) - - def create_resumable_upload_session( - self, content_type=None, size=None, origin=None, client=None - ): - """Create a resumable upload session. - - Resumable upload sessions allow you to start an upload session from - one client and complete the session in another. This method is called - by the initiator to set the metadata and limits. The initiator then - passes the session URL to the client that will upload the binary data. - The client performs a PUT request on the session URL to complete the - upload. This process allows untrusted clients to upload to an - access-controlled bucket. For more details, see the - `documentation on signed URLs`_. - - .. _documentation on signed URLs: - https://cloud.google.com/storage/\ - docs/access-control/signed-urls#signing-resumable - - The content type of the upload will be determined in order - of precedence: - - - The value passed in to this method (if not :data:`None`) - - The value stored on the current blob - - The default value ('application/octet-stream') - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - If :attr:`encryption_key` is set, the blob will be encrypted with - a `customer-supplied`_ encryption key. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type size: int - :param size: (Optional). The maximum number of bytes that can be - uploaded using this session. If the size is not known - when creating the session, this should be left blank. - - :type content_type: str - :param content_type: (Optional) Type of content being uploaded. - - :type origin: str - :param origin: (Optional) If set, the upload can only be completed - by a user-agent that uploads from the given origin. This - can be useful when passing the session to a web client. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: str - :returns: The resumable upload session URL. The upload can be - completed by making an HTTP PUT request with the - file's contents. - - :raises: :class:`google.cloud.exceptions.GoogleCloudError` - if the session creation response returns an error status. - """ - extra_headers = {} - if origin is not None: - # This header is specifically for client-side uploads, it - # determines the origins allowed for CORS. - extra_headers["Origin"] = origin - - try: - dummy_stream = BytesIO(b"") - # Send a fake the chunk size which we **know** will be acceptable - # to the `ResumableUpload` constructor. The chunk size only - # matters when **sending** bytes to an upload. - upload, _ = self._initiate_resumable_upload( - client, - dummy_stream, - content_type, - size, - None, - predefined_acl=None, - extra_headers=extra_headers, - chunk_size=self._CHUNK_SIZE_MULTIPLE, - ) - - return upload.resumable_url - except resumable_media.InvalidResponse as exc: - _raise_from_invalid_response(exc) - - def get_iam_policy(self, client=None, requested_policy_version=None): - """Retrieve the IAM policy for the object. - - .. note: - - Blob- / object-level IAM support does not yet exist and methods - currently call an internal ACL backend not providing any utility - beyond the blob's :attr:`acl` at this time. The API may be enhanced - in the future and is currently undocumented. Use :attr:`acl` for - managing object access control. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current object's bucket. - - :type requested_policy_version: int or ``NoneType`` - :param requested_policy_version: Optional. The version of IAM policies to request. - If a policy with a condition is requested without - setting this, the server will return an error. - This must be set to a value of 3 to retrieve IAM - policies containing conditions. This is to prevent - client code that isn't aware of IAM conditions from - interpreting and modifying policies incorrectly. - The service might return a policy with version lower - than the one that was requested, based on the - feature syntax in the policy fetched. - - :rtype: :class:`google.api_core.iam.Policy` - :returns: the policy instance, based on the resource returned from - the ``getIamPolicy`` API request. - """ - client = self._require_client(client) - - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - if requested_policy_version is not None: - query_params["optionsRequestedPolicyVersion"] = requested_policy_version - - info = client._connection.api_request( - method="GET", - path="%s/iam" % (self.path,), - query_params=query_params, - _target_object=None, - ) - return Policy.from_api_repr(info) - - def set_iam_policy(self, policy, client=None): - """Update the IAM policy for the bucket. - - .. note: - - Blob- / object-level IAM support does not yet exist and methods - currently call an internal ACL backend not providing any utility - beyond the blob's :attr:`acl` at this time. The API may be enhanced - in the future and is currently undocumented. Use :attr:`acl` for - managing object access control. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type policy: :class:`google.api_core.iam.Policy` - :param policy: policy instance used to update bucket's IAM policy. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`google.api_core.iam.Policy` - :returns: the policy instance, based on the resource returned from - the ``setIamPolicy`` API request. - """ - client = self._require_client(client) - - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - resource = policy.to_api_repr() - resource["resourceId"] = self.path - info = client._connection.api_request( - method="PUT", - path="%s/iam" % (self.path,), - query_params=query_params, - data=resource, - _target_object=None, - ) - return Policy.from_api_repr(info) - - def test_iam_permissions(self, permissions, client=None): - """API call: test permissions - - .. note: - - Blob- / object-level IAM support does not yet exist and methods - currently call an internal ACL backend not providing any utility - beyond the blob's :attr:`acl` at this time. The API may be enhanced - in the future and is currently undocumented. Use :attr:`acl` for - managing object access control. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type permissions: list of string - :param permissions: the permissions to check - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: list of string - :returns: the permissions returned by the ``testIamPermissions`` API - request. - """ - client = self._require_client(client) - query_params = {"permissions": permissions} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - path = "%s/iam/testPermissions" % (self.path,) - resp = client._connection.api_request( - method="GET", path=path, query_params=query_params - ) - - return resp.get("permissions", []) - - def make_public(self, client=None): - """Update blob's ACL, granting read access to anonymous users. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - self.acl.all().grant_read() - self.acl.save(client=client) - - def make_private(self, client=None): - """Update blob's ACL, revoking read access for anonymous users. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - self.acl.all().revoke_read() - self.acl.save(client=client) - - def compose(self, sources, client=None): - """Concatenate source blobs into this one. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type sources: list of :class:`Blob` - :param sources: blobs whose contents will be composed into this blob. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - request = { - "sourceObjects": [{"name": source.name} for source in sources], - "destination": self._properties.copy(), - } - api_response = client._connection.api_request( - method="POST", - path=self.path + "/compose", - query_params=query_params, - data=request, - _target_object=self, - ) - self._set_properties(api_response) - - def rewrite(self, source, token=None, client=None): - """Rewrite source blob into this one. - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type source: :class:`Blob` - :param source: blob whose contents will be rewritten into this blob. - - :type token: str - :param token: Optional. Token returned from an earlier, not-completed - call to rewrite the same source blob. If passed, - result will include updated status, total bytes written. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: tuple - :returns: ``(token, bytes_rewritten, total_bytes)``, where ``token`` - is a rewrite token (``None`` if the rewrite is complete), - ``bytes_rewritten`` is the number of bytes rewritten so far, - and ``total_bytes`` is the total number of bytes to be - rewritten. - """ - client = self._require_client(client) - headers = _get_encryption_headers(self._encryption_key) - headers.update(_get_encryption_headers(source._encryption_key, source=True)) - - query_params = self._query_params - if "generation" in query_params: - del query_params["generation"] - - if token: - query_params["rewriteToken"] = token - - if source.generation: - query_params["sourceGeneration"] = source.generation - - if self.kms_key_name is not None: - query_params["destinationKmsKeyName"] = self.kms_key_name - - api_response = client._connection.api_request( - method="POST", - path=source.path + "/rewriteTo" + self.path, - query_params=query_params, - data=self._properties, - headers=headers, - _target_object=self, - ) - rewritten = int(api_response["totalBytesRewritten"]) - size = int(api_response["objectSize"]) - - # The resource key is set if and only if the API response is - # completely done. Additionally, there is no rewrite token to return - # in this case. - if api_response["done"]: - self._set_properties(api_response["resource"]) - return None, rewritten, size - - return api_response["rewriteToken"], rewritten, size - - def update_storage_class(self, new_class, client=None): - """Update blob's storage class via a rewrite-in-place. This helper will - wait for the rewrite to complete before returning, so it may take some - time for large files. - - See - https://cloud.google.com/storage/docs/per-object-storage-class - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type new_class: str - :param new_class: - new storage class for the object. One of: - :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, - or - :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - if new_class not in self.STORAGE_CLASSES: - raise ValueError("Invalid storage class: %s" % (new_class,)) - - # Update current blob's storage class prior to rewrite - self._patch_property("storageClass", new_class) - - # Execute consecutive rewrite operations until operation is done - token, _, _ = self.rewrite(self) - while token is not None: - token, _, _ = self.rewrite(self, token=token) - - cache_control = _scalar_property("cacheControl") - """HTTP 'Cache-Control' header for this object. - - See `RFC 7234`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - - .. _RFC 7234: https://tools.ietf.org/html/rfc7234#section-5.2 - """ - - content_disposition = _scalar_property("contentDisposition") - """HTTP 'Content-Disposition' header for this object. - - See `RFC 6266`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - - .. _RFC 6266: https://tools.ietf.org/html/rfc7234#section-5.2 - """ - - content_encoding = _scalar_property("contentEncoding") - """HTTP 'Content-Encoding' header for this object. - - See `RFC 7231`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - - .. _RFC 7231: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 - """ - - content_language = _scalar_property("contentLanguage") - """HTTP 'Content-Language' header for this object. - - See `BCP47`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - - .. _BCP47: https://tools.ietf.org/html/bcp47 - """ - - content_type = _scalar_property(_CONTENT_TYPE_FIELD) - """HTTP 'Content-Type' header for this object. - - See `RFC 2616`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - - .. _RFC 2616: https://tools.ietf.org/html/rfc2616#section-14.17 - """ - - crc32c = _scalar_property("crc32c") - """CRC32C checksum for this object. - - See `RFC 4960`_ and `API reference docs`_. - - If not set before upload, the server will compute the hash. - - :rtype: str or ``NoneType`` - - .. _RFC 4960: https://tools.ietf.org/html/rfc4960#appendix-B - """ - - @property - def component_count(self): - """Number of underlying components that make up this object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: int or ``NoneType`` - :returns: The component count (in case of a composed object) or - ``None`` if the blob's resource has not been loaded from - the server. This property will not be set on objects - not created via ``compose``. - """ - component_count = self._properties.get("componentCount") - if component_count is not None: - return int(component_count) - - @property - def etag(self): - """Retrieve the ETag for the object. - - See `RFC 2616 (etags)`_ and `API reference docs`_. - - :rtype: str or ``NoneType`` - :returns: The blob etag or ``None`` if the blob's resource has not - been loaded from the server. - - .. _RFC 2616 (etags): https://tools.ietf.org/html/rfc2616#section-3.11 - """ - return self._properties.get("etag") - - event_based_hold = _scalar_property("eventBasedHold") - """Is an event-based hold active on the object? - - See `API reference docs`_. - - If the property is not set locally, returns :data:`None`. - - :rtype: bool or ``NoneType`` - """ - - @property - def generation(self): - """Retrieve the generation for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: int or ``NoneType`` - :returns: The generation of the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - generation = self._properties.get("generation") - if generation is not None: - return int(generation) - - @property - def id(self): - """Retrieve the ID for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - The ID consists of the bucket name, object name, and generation number. - - :rtype: str or ``NoneType`` - :returns: The ID of the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - return self._properties.get("id") - - md5_hash = _scalar_property("md5Hash") - """MD5 hash for this object. - - See `RFC 1321`_ and `API reference docs`_. - - If not set before upload, the server will compute the hash. - - :rtype: str or ``NoneType`` - - .. _RFC 1321: https://tools.ietf.org/html/rfc1321 - """ - - @property - def media_link(self): - """Retrieve the media download URI for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: str or ``NoneType`` - :returns: The media link for the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - return self._properties.get("mediaLink") - - @property - def metadata(self): - """Retrieve arbitrary/application specific metadata for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :setter: Update arbitrary/application specific metadata for the - object. - :getter: Retrieve arbitrary/application specific metadata for - the object. - - :rtype: dict or ``NoneType`` - :returns: The metadata associated with the blob or ``None`` if the - property is not set. - """ - return copy.deepcopy(self._properties.get("metadata")) - - @metadata.setter - def metadata(self, value): - """Update arbitrary/application specific metadata for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :type value: dict - :param value: (Optional) The blob metadata to set. - """ - value = {k: str(v) for k, v in value.items()} - self._patch_property("metadata", value) - - @property - def metageneration(self): - """Retrieve the metageneration for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: int or ``NoneType`` - :returns: The metageneration of the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - metageneration = self._properties.get("metageneration") - if metageneration is not None: - return int(metageneration) - - @property - def owner(self): - """Retrieve info about the owner of the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: dict or ``NoneType`` - :returns: Mapping of owner's role/ID, or ``None`` if the blob's - resource has not been loaded from the server. - """ - return copy.deepcopy(self._properties.get("owner")) - - @property - def retention_expiration_time(self): - """Retrieve timestamp at which the object's retention period expires. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the property is not set locally. - """ - value = self._properties.get("retentionExpirationTime") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def self_link(self): - """Retrieve the URI for the object. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: str or ``NoneType`` - :returns: The self link for the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - return self._properties.get("selfLink") - - @property - def size(self): - """Size of the object, in bytes. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: int or ``NoneType`` - :returns: The size of the blob or ``None`` if the blob's - resource has not been loaded from the server. - """ - size = self._properties.get("size") - if size is not None: - return int(size) - - @property - def kms_key_name(self): - """Resource name of Cloud KMS key used to encrypt the blob's contents. - - :rtype: str or ``NoneType`` - :returns: - The resource name or ``None`` if no Cloud KMS key was used, - or the blob's resource has not been loaded from the server. - """ - return self._properties.get("kmsKeyName") - - storage_class = _scalar_property("storageClass") - """Retrieve the storage class for the object. - - This can only be set at blob / object **creation** time. If you'd - like to change the storage class **after** the blob / object already - exists in a bucket, call :meth:`update_storage_class` (which uses - :meth:`rewrite`). - - See https://cloud.google.com/storage/docs/storage-classes - - :rtype: str or ``NoneType`` - :returns: - If set, one of - :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_STORAGE_CLASS`, - else ``None``. - """ - - temporary_hold = _scalar_property("temporaryHold") - """Is a temporary hold active on the object? - - See `API reference docs`_. - - If the property is not set locally, returns :data:`None`. - - :rtype: bool or ``NoneType`` - """ - - @property - def time_deleted(self): - """Retrieve the timestamp at which the object was deleted. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the blob's resource has not been loaded from - the server (see :meth:`reload`). If the blob has - not been deleted, this will never be set. - """ - value = self._properties.get("timeDeleted") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def time_created(self): - """Retrieve the timestamp at which the object was created. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the blob's resource has not been loaded from - the server (see :meth:`reload`). - """ - value = self._properties.get("timeCreated") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def updated(self): - """Retrieve the timestamp at which the object was updated. - - See https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the blob's resource has not been loaded from - the server (see :meth:`reload`). - """ - value = self._properties.get("updated") - if value is not None: - return _rfc3339_to_datetime(value) - - -def _get_encryption_headers(key, source=False): - """Builds customer encryption key headers - - :type key: bytes - :param key: 32 byte key to build request key and hash. - - :type source: bool - :param source: If true, return headers for the "source" blob; otherwise, - return headers for the "destination" blob. - - :rtype: dict - :returns: dict of HTTP headers being sent in request. - """ - if key is None: - return {} - - key = _to_bytes(key) - key_hash = hashlib.sha256(key).digest() - key_hash = base64.b64encode(key_hash) - key = base64.b64encode(key) - - if source: - prefix = "X-Goog-Copy-Source-Encryption-" - else: - prefix = "X-Goog-Encryption-" - - return { - prefix + "Algorithm": "AES256", - prefix + "Key": _bytes_to_unicode(key), - prefix + "Key-Sha256": _bytes_to_unicode(key_hash), - } - - -def _quote(value, safe=b"~"): - """URL-quote a string. - - If the value is unicode, this method first UTF-8 encodes it as bytes and - then quotes the bytes. (In Python 3, ``urllib.parse.quote`` does this - encoding automatically, but in Python 2, non-ASCII characters cannot be - quoted.) - - :type value: str or bytes - :param value: The value to be URL-quoted. - - :type safe: bytes - :param safe: Bytes *not* to be quoted. By default, includes only ``b'~'``. - - :rtype: str - :returns: The encoded value (bytes in Python 2, unicode in Python 3). - """ - value = _to_bytes(value, encoding="utf-8") - return quote(value, safe=safe) - - -def _maybe_rewind(stream, rewind=False): - """Rewind the stream if desired. - - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. - - :type rewind: bool - :param rewind: Indicates if we should seek to the beginning of the stream. - """ - if rewind: - stream.seek(0, os.SEEK_SET) - - -def _raise_from_invalid_response(error): - """Re-wrap and raise an ``InvalidResponse`` exception. - - :type error: :exc:`google.resumable_media.InvalidResponse` - :param error: A caught exception from the ``google-resumable-media`` - library. - - :raises: :class:`~google.cloud.exceptions.GoogleCloudError` corresponding - to the failed status code - """ - response = error.response - error_message = str(error) - - message = u"{method} {url}: {error}".format( - method=response.request.method, url=response.request.url, error=error_message - ) - - raise exceptions.from_http_status(response.status_code, message, response=response) - - -def _add_query_parameters(base_url, name_value_pairs): - """Add one query parameter to a base URL. - - :type base_url: string - :param base_url: Base URL (may already contain query parameters) - - :type name_value_pairs: list of (string, string) tuples. - :param name_value_pairs: Names and values of the query parameters to add - - :rtype: string - :returns: URL with additional query strings appended. - """ - if len(name_value_pairs) == 0: - return base_url - - scheme, netloc, path, query, frag = urlsplit(base_url) - query = parse_qsl(query) - query.extend(name_value_pairs) - return urlunsplit((scheme, netloc, path, urlencode(query), frag)) diff --git a/storage/google/cloud/storage/bucket.py b/storage/google/cloud/storage/bucket.py deleted file mode 100644 index ed275e88fcec..000000000000 --- a/storage/google/cloud/storage/bucket.py +++ /dev/null @@ -1,2318 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with Google Cloud Storage buckets.""" - -import base64 -import copy -import datetime -import json -import warnings - -import six -from six.moves.urllib.parse import urlsplit - -from google.api_core import page_iterator -from google.api_core import datetime_helpers -from google.cloud._helpers import _datetime_to_rfc3339 -from google.cloud._helpers import _NOW -from google.cloud._helpers import _rfc3339_to_datetime -from google.cloud.exceptions import NotFound -from google.api_core.iam import Policy -from google.cloud.storage import _signing -from google.cloud.storage._helpers import _PropertyMixin -from google.cloud.storage._helpers import _scalar_property -from google.cloud.storage._helpers import _validate_name -from google.cloud.storage._signing import generate_signed_url_v2 -from google.cloud.storage._signing import generate_signed_url_v4 -from google.cloud.storage.acl import BucketACL -from google.cloud.storage.acl import DefaultObjectACL -from google.cloud.storage.blob import Blob -from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS -from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS -from google.cloud.storage.constants import DUAL_REGION_LOCATION_TYPE -from google.cloud.storage.constants import ( - DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, -) -from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS -from google.cloud.storage.constants import MULTI_REGION_LOCATION_TYPE -from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS -from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS -from google.cloud.storage.constants import REGION_LOCATION_TYPE -from google.cloud.storage.constants import STANDARD_STORAGE_CLASS -from google.cloud.storage.notification import BucketNotification -from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - -_UBLA_BPO_ENABLED_MESSAGE = ( - "Pass only one of 'uniform_bucket_level_access_enabled' / " - "'bucket_policy_only_enabled' to 'IAMConfiguration'." -) -_BPO_ENABLED_MESSAGE = ( - "'IAMConfiguration.bucket_policy_only_enabled' is deprecated. " - "Instead, use 'IAMConfiguration.uniform_bucket_level_access_enabled'." -) -_UBLA_BPO_LOCK_TIME_MESSAGE = ( - "Pass only one of 'uniform_bucket_level_access_lock_time' / " - "'bucket_policy_only_lock_time' to 'IAMConfiguration'." -) -_BPO_LOCK_TIME_MESSAGE = ( - "'IAMConfiguration.bucket_policy_only_lock_time' is deprecated. " - "Instead, use 'IAMConfiguration.uniform_bucket_level_access_lock_time'." -) -_LOCATION_SETTER_MESSAGE = ( - "Assignment to 'Bucket.location' is deprecated, as it is only " - "valid before the bucket is created. Instead, pass the location " - "to `Bucket.create`." -) -_API_ACCESS_ENDPOINT = "https://storage.googleapis.com" - - -def _blobs_page_start(iterator, page, response): - """Grab prefixes after a :class:`~google.cloud.iterator.Page` started. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type page: :class:`~google.cloud.api.core.page_iterator.Page` - :param page: The page that was just created. - - :type response: dict - :param response: The JSON API response for a page of blobs. - """ - page.prefixes = tuple(response.get("prefixes", ())) - iterator.prefixes.update(page.prefixes) - - -def _item_to_blob(iterator, item): - """Convert a JSON blob to the native object. - - .. note:: - - This assumes that the ``bucket`` attribute has been - added to the iterator after being created. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that has retrieved the item. - - :type item: dict - :param item: An item to be converted to a blob. - - :rtype: :class:`.Blob` - :returns: The next blob in the page. - """ - name = item.get("name") - blob = Blob(name, bucket=iterator.bucket) - blob._set_properties(item) - return blob - - -def _item_to_notification(iterator, item): - """Convert a JSON blob to the native object. - - .. note:: - - This assumes that the ``bucket`` attribute has been - added to the iterator after being created. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that has retrieved the item. - - :type item: dict - :param item: An item to be converted to a blob. - - :rtype: :class:`.BucketNotification` - :returns: The next notification being iterated. - """ - return BucketNotification.from_api_repr(item, bucket=iterator.bucket) - - -class LifecycleRuleConditions(dict): - """Map a single lifecycle rule for a bucket. - - See: https://cloud.google.com/storage/docs/lifecycle - - :type age: int - :param age: (optional) apply rule action to items whos age, in days, - exceeds this value. - - :type created_before: datetime.date - :param created_before: (optional) apply rule action to items created - before this date. - - :type is_live: bool - :param is_live: (optional) if true, apply rule action to non-versioned - items, or to items with no newer versions. If false, apply - rule action to versioned items with at least one newer - version. - - :type matches_storage_class: list(str), one or more of - :attr:`Bucket.STORAGE_CLASSES`. - :param matches_storage_class: (optional) apply rule action to items which - whose storage class matches this value. - - :type number_of_newer_versions: int - :param number_of_newer_versions: (optional) apply rule action to versioned - items having N newer versions. - - :raises ValueError: if no arguments are passed. - """ - - def __init__( - self, - age=None, - created_before=None, - is_live=None, - matches_storage_class=None, - number_of_newer_versions=None, - _factory=False, - ): - conditions = {} - - if age is not None: - conditions["age"] = age - - if created_before is not None: - conditions["createdBefore"] = created_before.isoformat() - - if is_live is not None: - conditions["isLive"] = is_live - - if matches_storage_class is not None: - conditions["matchesStorageClass"] = matches_storage_class - - if number_of_newer_versions is not None: - conditions["numNewerVersions"] = number_of_newer_versions - - if not _factory and not conditions: - raise ValueError("Supply at least one condition") - - super(LifecycleRuleConditions, self).__init__(conditions) - - @classmethod - def from_api_repr(cls, resource): - """Factory: construct instance from resource. - - :type resource: dict - :param resource: mapping as returned from API call. - - :rtype: :class:`LifecycleRuleConditions` - :returns: Instance created from resource. - """ - instance = cls(_factory=True) - instance.update(resource) - return instance - - @property - def age(self): - """Conditon's age value.""" - return self.get("age") - - @property - def created_before(self): - """Conditon's created_before value.""" - before = self.get("createdBefore") - if before is not None: - return datetime_helpers.from_iso8601_date(before) - - @property - def is_live(self): - """Conditon's 'is_live' value.""" - return self.get("isLive") - - @property - def matches_storage_class(self): - """Conditon's 'matches_storage_class' value.""" - return self.get("matchesStorageClass") - - @property - def number_of_newer_versions(self): - """Conditon's 'number_of_newer_versions' value.""" - return self.get("numNewerVersions") - - -class LifecycleRuleDelete(dict): - """Map a lifecycle rule deleting matching items. - - :type kw: dict - :params kw: arguments passed to :class:`LifecycleRuleConditions`. - """ - - def __init__(self, **kw): - conditions = LifecycleRuleConditions(**kw) - rule = {"action": {"type": "Delete"}, "condition": dict(conditions)} - super(LifecycleRuleDelete, self).__init__(rule) - - @classmethod - def from_api_repr(cls, resource): - """Factory: construct instance from resource. - - :type resource: dict - :param resource: mapping as returned from API call. - - :rtype: :class:`LifecycleRuleDelete` - :returns: Instance created from resource. - """ - instance = cls(_factory=True) - instance.update(resource) - return instance - - -class LifecycleRuleSetStorageClass(dict): - """Map a lifecycle rule upating storage class of matching items. - - :type storage_class: str, one of :attr:`Bucket.STORAGE_CLASSES`. - :param storage_class: new storage class to assign to matching items. - - :type kw: dict - :params kw: arguments passed to :class:`LifecycleRuleConditions`. - """ - - def __init__(self, storage_class, **kw): - conditions = LifecycleRuleConditions(**kw) - rule = { - "action": {"type": "SetStorageClass", "storageClass": storage_class}, - "condition": dict(conditions), - } - super(LifecycleRuleSetStorageClass, self).__init__(rule) - - @classmethod - def from_api_repr(cls, resource): - """Factory: construct instance from resource. - - :type resource: dict - :param resource: mapping as returned from API call. - - :rtype: :class:`LifecycleRuleDelete` - :returns: Instance created from resource. - """ - action = resource["action"] - instance = cls(action["storageClass"], _factory=True) - instance.update(resource) - return instance - - -_default = object() - - -class IAMConfiguration(dict): - """Map a bucket's IAM configuration. - - :type bucket: :class:`Bucket` - :params bucket: Bucket for which this instance is the policy. - - :type uniform_bucket_level_access_enabled: bool - :params bucket_policy_only_enabled: - (optional) whether the IAM-only policy is enabled for the bucket. - - :type uniform_bucket_level_locked_time: :class:`datetime.datetime` - :params uniform_bucket_level_locked_time: - (optional) When the bucket's IAM-only policy was enabled. - This value should normally only be set by the back-end API. - - :type bucket_policy_only_enabled: bool - :params bucket_policy_only_enabled: - Deprecated alias for :data:`uniform_bucket_level_access_enabled`. - - :type bucket_policy_only_locked_time: :class:`datetime.datetime` - :params bucket_policy_only_locked_time: - Deprecated alias for :data:`uniform_bucket_level_access_locked_time`. - """ - - def __init__( - self, - bucket, - uniform_bucket_level_access_enabled=_default, - uniform_bucket_level_access_locked_time=_default, - bucket_policy_only_enabled=_default, - bucket_policy_only_locked_time=_default, - ): - if bucket_policy_only_enabled is not _default: - - if uniform_bucket_level_access_enabled is not _default: - raise ValueError(_UBLA_BPO_ENABLED_MESSAGE) - - warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) - uniform_bucket_level_access_enabled = bucket_policy_only_enabled - - if bucket_policy_only_locked_time is not _default: - - if uniform_bucket_level_access_locked_time is not _default: - raise ValueError(_UBLA_BPO_LOCK_TIME_MESSAGE) - - warnings.warn(_BPO_LOCK_TIME_MESSAGE, DeprecationWarning, stacklevel=2) - uniform_bucket_level_access_locked_time = bucket_policy_only_locked_time - - if uniform_bucket_level_access_enabled is _default: - uniform_bucket_level_access_enabled = False - - data = { - "uniformBucketLevelAccess": {"enabled": uniform_bucket_level_access_enabled} - } - if uniform_bucket_level_access_locked_time is not _default: - data["uniformBucketLevelAccess"]["lockedTime"] = _datetime_to_rfc3339( - uniform_bucket_level_access_locked_time - ) - super(IAMConfiguration, self).__init__(data) - self._bucket = bucket - - @classmethod - def from_api_repr(cls, resource, bucket): - """Factory: construct instance from resource. - - :type bucket: :class:`Bucket` - :params bucket: Bucket for which this instance is the policy. - - :type resource: dict - :param resource: mapping as returned from API call. - - :rtype: :class:`IAMConfiguration` - :returns: Instance created from resource. - """ - instance = cls(bucket) - instance.update(resource) - return instance - - @property - def bucket(self): - """Bucket for which this instance is the policy. - - :rtype: :class:`Bucket` - :returns: the instance's bucket. - """ - return self._bucket - - @property - def uniform_bucket_level_access_enabled(self): - """If set, access checks only use bucket-level IAM policies or above. - - :rtype: bool - :returns: whether the bucket is configured to allow only IAM. - """ - ubla = self.get("uniformBucketLevelAccess", {}) - return ubla.get("enabled", False) - - @uniform_bucket_level_access_enabled.setter - def uniform_bucket_level_access_enabled(self, value): - ubla = self.setdefault("uniformBucketLevelAccess", {}) - ubla["enabled"] = bool(value) - self.bucket._patch_property("iamConfiguration", self) - - @property - def uniform_bucket_level_access_locked_time(self): - """Deadline for changing :attr:`uniform_bucket_level_access_enabled` from true to false. - - If the bucket's :attr:`uniform_bucket_level_access_enabled` is true, this property - is time time after which that setting becomes immutable. - - If the bucket's :attr:`uniform_bucket_level_access_enabled` is false, this property - is ``None``. - - :rtype: Union[:class:`datetime.datetime`, None] - :returns: (readonly) Time after which :attr:`uniform_bucket_level_access_enabled` will - be frozen as true. - """ - ubla = self.get("uniformBucketLevelAccess", {}) - stamp = ubla.get("lockedTime") - if stamp is not None: - stamp = _rfc3339_to_datetime(stamp) - return stamp - - @property - def bucket_policy_only_enabled(self): - """Deprecated alias for :attr:`uniform_bucket_level_access_enabled`. - - :rtype: bool - :returns: whether the bucket is configured to allow only IAM. - """ - return self.uniform_bucket_level_access_enabled - - @bucket_policy_only_enabled.setter - def bucket_policy_only_enabled(self, value): - warnings.warn(_BPO_ENABLED_MESSAGE, DeprecationWarning, stacklevel=2) - self.uniform_bucket_level_access_enabled = value - - @property - def bucket_policy_only_locked_time(self): - """Deprecated alias for :attr:`uniform_bucket_level_access_locked_time`. - - :rtype: Union[:class:`datetime.datetime`, None] - :returns: - (readonly) Time after which :attr:`bucket_policy_only_enabled` will - be frozen as true. - """ - return self.uniform_bucket_level_access_locked_time - - -class Bucket(_PropertyMixin): - """A class representing a Bucket on Cloud Storage. - - :type client: :class:`google.cloud.storage.client.Client` - :param client: A client which holds credentials and project configuration - for the bucket (which requires a project). - - :type name: str - :param name: The name of the bucket. Bucket names must start and end with a - number or letter. - - :type user_project: str - :param user_project: (Optional) the project ID to be billed for API - requests made via this instance. - """ - - _MAX_OBJECTS_FOR_ITERATION = 256 - """Maximum number of existing objects allowed in iteration. - - This is used in Bucket.delete() and Bucket.make_public(). - """ - - STORAGE_CLASSES = ( - STANDARD_STORAGE_CLASS, - NEARLINE_STORAGE_CLASS, - COLDLINE_STORAGE_CLASS, - ARCHIVE_STORAGE_CLASS, - MULTI_REGIONAL_LEGACY_STORAGE_CLASS, # legacy - REGIONAL_LEGACY_STORAGE_CLASS, # legacy - DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, # legacy - ) - """Allowed values for :attr:`storage_class`. - - Default value is :attr:`STANDARD_STORAGE_CLASS`. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass - https://cloud.google.com/storage/docs/storage-classes - """ - - _LOCATION_TYPES = ( - MULTI_REGION_LOCATION_TYPE, - REGION_LOCATION_TYPE, - DUAL_REGION_LOCATION_TYPE, - ) - """Allowed values for :attr:`location_type`.""" - - def __init__(self, client, name=None, user_project=None): - name = _validate_name(name) - super(Bucket, self).__init__(name=name) - self._client = client - self._acl = BucketACL(self) - self._default_object_acl = DefaultObjectACL(self) - self._label_removals = set() - self._user_project = user_project - - def __repr__(self): - return "" % (self.name,) - - @property - def client(self): - """The client bound to this bucket.""" - return self._client - - def _set_properties(self, value): - """Set the properties for the current object. - - :type value: dict or :class:`google.cloud.storage.batch._FutureDict` - :param value: The properties to be set. - """ - self._label_removals.clear() - return super(Bucket, self)._set_properties(value) - - @property - def user_project(self): - """Project ID to be billed for API requests made via this bucket. - - If unset, API requests are billed to the bucket owner. - - :rtype: str - """ - return self._user_project - - @classmethod - def from_string(cls, uri, client=None): - """Get a constructor for bucket object by URI. - - :type uri: str - :param uri: The bucket uri pass to get bucket object. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. - - :rtype: :class:`google.cloud.storage.bucket.Bucket` - :returns: The bucket object created. - - Example: - Get a constructor for bucket object by URI.. - - >>> from google.cloud import storage - >>> from google.cloud.storage.bucket import Bucket - >>> client = storage.Client() - >>> bucket = Bucket.from_string("gs://bucket",client) - """ - scheme, netloc, path, query, frag = urlsplit(uri) - - if scheme != "gs": - raise ValueError("URI scheme must be gs") - - return cls(client, name=netloc) - - def blob( - self, - blob_name, - chunk_size=None, - encryption_key=None, - kms_key_name=None, - generation=None, - ): - """Factory constructor for blob object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a blob object owned by this bucket. - - :type blob_name: str - :param blob_name: The name of the blob to be instantiated. - - :type chunk_size: int - :param chunk_size: The size of a chunk of data whenever iterating - (in bytes). This must be a multiple of 256 KB per - the API specification. - - :type encryption_key: bytes - :param encryption_key: - Optional 32 byte encryption key for customer-supplied encryption. - - :type kms_key_name: str - :param kms_key_name: - Optional resource name of KMS key used to encrypt blob's content. - - :type generation: long - :param generation: Optional. If present, selects a specific revision of - this object. - - :rtype: :class:`google.cloud.storage.blob.Blob` - :returns: The blob object created. - """ - return Blob( - name=blob_name, - bucket=self, - chunk_size=chunk_size, - encryption_key=encryption_key, - kms_key_name=kms_key_name, - generation=generation, - ) - - def notification( - self, - topic_name, - topic_project=None, - custom_attributes=None, - event_types=None, - blob_name_prefix=None, - payload_format=NONE_PAYLOAD_FORMAT, - ): - """Factory: create a notification resource for the bucket. - - See: :class:`.BucketNotification` for parameters. - - :rtype: :class:`.BucketNotification` - """ - return BucketNotification( - self, - topic_name, - topic_project=topic_project, - custom_attributes=custom_attributes, - event_types=event_types, - blob_name_prefix=blob_name_prefix, - payload_format=payload_format, - ) - - def exists(self, client=None): - """Determines whether or not this bucket exists. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: bool - :returns: True if the bucket exists in Cloud Storage. - """ - client = self._require_client(client) - # We only need the status code (200 or not) so we seek to - # minimize the returned payload. - query_params = {"fields": "name"} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - try: - # We intentionally pass `_target_object=None` since fields=name - # would limit the local properties. - client._connection.api_request( - method="GET", - path=self.path, - query_params=query_params, - _target_object=None, - ) - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - return True - except NotFound: - return False - - def create( - self, - client=None, - project=None, - location=None, - predefined_acl=None, - predefined_default_object_acl=None, - ): - """Creates current bucket. - - If the bucket already exists, will raise - :class:`google.cloud.exceptions.Conflict`. - - This implements "storage.buckets.insert". - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :type project: str - :param project: Optional. The project under which the bucket is to - be created. If not passed, uses the project set on - the client. - :raises ValueError: if :attr:`user_project` is set. - :raises ValueError: if ``project`` is None and client's - :attr:`project` is also None. - - :type location: str - :param location: Optional. The location of the bucket. If not passed, - the default location, US, will be used. See - https://cloud.google.com/storage/docs/bucket-locations - - :type predefined_acl: str - :param predefined_acl: - Optional. Name of predefined ACL to apply to bucket. See: - https://cloud.google.com/storage/docs/access-control/lists#predefined-acl - - :type predefined_default_object_acl: str - :param predefined_default_object_acl: - Optional. Name of predefined ACL to apply to bucket's objects. See: - https://cloud.google.com/storage/docs/access-control/lists#predefined-acl - """ - if self.user_project is not None: - raise ValueError("Cannot create bucket with 'user_project' set.") - - client = self._require_client(client) - - if project is None: - project = client.project - - if project is None: - raise ValueError("Client project not set: pass an explicit project.") - - query_params = {"project": project} - - if predefined_acl is not None: - predefined_acl = BucketACL.validate_predefined(predefined_acl) - query_params["predefinedAcl"] = predefined_acl - - if predefined_default_object_acl is not None: - predefined_default_object_acl = DefaultObjectACL.validate_predefined( - predefined_default_object_acl - ) - query_params["predefinedDefaultObjectAcl"] = predefined_default_object_acl - - properties = {key: self._properties[key] for key in self._changes} - properties["name"] = self.name - - if location is not None: - properties["location"] = location - - api_response = client._connection.api_request( - method="POST", - path="/b", - query_params=query_params, - data=properties, - _target_object=self, - ) - self._set_properties(api_response) - - def patch(self, client=None): - """Sends all changed properties in a PATCH request. - - Updates the ``_properties`` with the response from the backend. - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - # Special case: For buckets, it is possible that labels are being - # removed; this requires special handling. - if self._label_removals: - self._changes.add("labels") - self._properties.setdefault("labels", {}) - for removed_label in self._label_removals: - self._properties["labels"][removed_label] = None - - # Call the superclass method. - return super(Bucket, self).patch(client=client) - - @property - def acl(self): - """Create our ACL on demand.""" - return self._acl - - @property - def default_object_acl(self): - """Create our defaultObjectACL on demand.""" - return self._default_object_acl - - @staticmethod - def path_helper(bucket_name): - """Relative URL path for a bucket. - - :type bucket_name: str - :param bucket_name: The bucket name in the path. - - :rtype: str - :returns: The relative URL path for ``bucket_name``. - """ - return "/b/" + bucket_name - - @property - def path(self): - """The URL path to this bucket.""" - if not self.name: - raise ValueError("Cannot determine path without bucket name.") - - return self.path_helper(self.name) - - def get_blob( - self, blob_name, client=None, encryption_key=None, generation=None, **kwargs - ): - """Get a blob object by name. - - This will return None if the blob doesn't exist: - - .. literalinclude:: snippets.py - :start-after: [START get_blob] - :end-before: [END get_blob] - - If :attr:`user_project` is set, bills the API request to that project. - - :type blob_name: str - :param blob_name: The name of the blob to retrieve. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :type encryption_key: bytes - :param encryption_key: - Optional 32 byte encryption key for customer-supplied encryption. - See - https://cloud.google.com/storage/docs/encryption#customer-supplied. - - :type generation: long - :param generation: Optional. If present, selects a specific revision of - this object. - - :param kwargs: Keyword arguments to pass to the - :class:`~google.cloud.storage.blob.Blob` constructor. - - :rtype: :class:`google.cloud.storage.blob.Blob` or None - :returns: The blob object if it exists, otherwise None. - """ - blob = Blob( - bucket=self, - name=blob_name, - encryption_key=encryption_key, - generation=generation, - **kwargs - ) - try: - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - blob.reload(client=client) - except NotFound: - return None - else: - return blob - - def list_blobs( - self, - max_results=None, - page_token=None, - prefix=None, - delimiter=None, - versions=None, - projection="noAcl", - fields=None, - client=None, - ): - """Return an iterator used to find blobs in the bucket. - - .. note:: - Direct use of this method is deprecated. Use ``Client.list_blobs`` instead. - - If :attr:`user_project` is set, bills the API request to that project. - - :type max_results: int - :param max_results: - (Optional) The maximum number of blobs in each page of results - from this request. Non-positive values are ignored. Defaults to - a sensible value set by the API. - - :type page_token: str - :param page_token: - (Optional) If present, return the next batch of blobs, using the - value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing the - token. - - :type prefix: str - :param prefix: (Optional) prefix used to filter blobs. - - :type delimiter: str - :param delimiter: (Optional) Delimiter, used with ``prefix`` to - emulate hierarchy. - - :type versions: bool - :param versions: (Optional) Whether object versions should be returned - as separate blobs. - - :type projection: str - :param projection: (Optional) If used, must be 'full' or 'noAcl'. - Defaults to ``'noAcl'``. Specifies the set of - properties to return. - - :type fields: str - :param fields: - (Optional) Selector specifying which fields to include - in a partial response. Must be a list of fields. For - example to get a partial response with just the next - page token and the name and language of each blob returned: - ``'items(name,contentLanguage),nextPageToken'``. - See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of all :class:`~google.cloud.storage.blob.Blob` - in this bucket matching the arguments. - """ - extra_params = {"projection": projection} - - if prefix is not None: - extra_params["prefix"] = prefix - - if delimiter is not None: - extra_params["delimiter"] = delimiter - - if versions is not None: - extra_params["versions"] = versions - - if fields is not None: - extra_params["fields"] = fields - - if self.user_project is not None: - extra_params["userProject"] = self.user_project - - client = self._require_client(client) - path = self.path + "/o" - iterator = page_iterator.HTTPIterator( - client=client, - api_request=client._connection.api_request, - path=path, - item_to_value=_item_to_blob, - page_token=page_token, - max_results=max_results, - extra_params=extra_params, - page_start=_blobs_page_start, - ) - iterator.bucket = self - iterator.prefixes = set() - return iterator - - def list_notifications(self, client=None): - """List Pub / Sub notifications for this bucket. - - See: - https://cloud.google.com/storage/docs/json_api/v1/notifications/list - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: list of :class:`.BucketNotification` - :returns: notification instances - """ - client = self._require_client(client) - path = self.path + "/notificationConfigs" - iterator = page_iterator.HTTPIterator( - client=client, - api_request=client._connection.api_request, - path=path, - item_to_value=_item_to_notification, - ) - iterator.bucket = self - return iterator - - def delete(self, force=False, client=None): - """Delete this bucket. - - The bucket **must** be empty in order to submit a delete request. If - ``force=True`` is passed, this will first attempt to delete all the - objects / blobs in the bucket (i.e. try to empty the bucket). - - If the bucket doesn't exist, this will raise - :class:`google.cloud.exceptions.NotFound`. If the bucket is not empty - (and ``force=False``), will raise - :class:`google.cloud.exceptions.Conflict`. - - If ``force=True`` and the bucket contains more than 256 objects / blobs - this will cowardly refuse to delete the objects (or the bucket). This - is to prevent accidental bucket deletion and to prevent extremely long - runtime of this method. - - If :attr:`user_project` is set, bills the API request to that project. - - :type force: bool - :param force: If True, empties the bucket's objects then deletes it. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket - contains more than 256 objects / blobs. - """ - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - if force: - blobs = list( - self.list_blobs( - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, client=client - ) - ) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: - message = ( - "Refusing to delete bucket with more than " - "%d objects. If you actually want to delete " - "this bucket, please delete the objects " - "yourself before calling Bucket.delete()." - ) % (self._MAX_OBJECTS_FOR_ITERATION,) - raise ValueError(message) - - # Ignore 404 errors on delete. - self.delete_blobs(blobs, on_error=lambda blob: None, client=client) - - # We intentionally pass `_target_object=None` since a DELETE - # request has no response value (whether in a standard request or - # in a batch request). - client._connection.api_request( - method="DELETE", - path=self.path, - query_params=query_params, - _target_object=None, - ) - - def delete_blob(self, blob_name, client=None, generation=None): - """Deletes a blob from the current bucket. - - If the blob isn't found (backend 404), raises a - :class:`google.cloud.exceptions.NotFound`. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - - If :attr:`user_project` is set, bills the API request to that project. - - :type blob_name: str - :param blob_name: A blob name to delete. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :type generation: long - :param generation: Optional. If present, permanently deletes a specific - revision of this object. - - :raises: :class:`google.cloud.exceptions.NotFound` (to suppress - the exception, call ``delete_blobs``, passing a no-op - ``on_error`` callback, e.g.: - - .. literalinclude:: snippets.py - :start-after: [START delete_blobs] - :end-before: [END delete_blobs] - - """ - client = self._require_client(client) - blob = Blob(blob_name, bucket=self, generation=generation) - - # We intentionally pass `_target_object=None` since a DELETE - # request has no response value (whether in a standard request or - # in a batch request). - client._connection.api_request( - method="DELETE", - path=blob.path, - query_params=blob._query_params, - _target_object=None, - ) - - def delete_blobs(self, blobs, on_error=None, client=None): - """Deletes a list of blobs from the current bucket. - - Uses :meth:`delete_blob` to delete each individual blob. - - If :attr:`user_project` is set, bills the API request to that project. - - :type blobs: list - :param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or - blob names to delete. - - :type on_error: callable - :param on_error: (Optional) Takes single argument: ``blob``. Called - called once for each blob raising - :class:`~google.cloud.exceptions.NotFound`; - otherwise, the exception is propagated. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`~google.cloud.exceptions.NotFound` (if - `on_error` is not passed). - """ - for blob in blobs: - try: - blob_name = blob - if not isinstance(blob_name, six.string_types): - blob_name = blob.name - self.delete_blob(blob_name, client=client) - except NotFound: - if on_error is not None: - on_error(blob) - else: - raise - - def copy_blob( - self, - blob, - destination_bucket, - new_name=None, - client=None, - preserve_acl=True, - source_generation=None, - ): - """Copy the given blob to the given bucket, optionally with a new name. - - If :attr:`user_project` is set, bills the API request to that project. - - :type blob: :class:`google.cloud.storage.blob.Blob` - :param blob: The blob to be copied. - - :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` - :param destination_bucket: The bucket into which the blob should be - copied. - - :type new_name: str - :param new_name: (optional) the new name for the copied file. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :type preserve_acl: bool - :param preserve_acl: Optional. Copies ACL from old blob to new blob. - Default: True. - - :type source_generation: long - :param source_generation: Optional. The generation of the blob to be - copied. - - :rtype: :class:`google.cloud.storage.blob.Blob` - :returns: The new Blob. - """ - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - if source_generation is not None: - query_params["sourceGeneration"] = source_generation - - if new_name is None: - new_name = blob.name - - new_blob = Blob(bucket=destination_bucket, name=new_name) - api_path = blob.path + "/copyTo" + new_blob.path - copy_result = client._connection.api_request( - method="POST", - path=api_path, - query_params=query_params, - _target_object=new_blob, - ) - - if not preserve_acl: - new_blob.acl.save(acl={}, client=client) - - new_blob._set_properties(copy_result) - return new_blob - - def rename_blob(self, blob, new_name, client=None): - """Rename the given blob using copy and delete operations. - - If :attr:`user_project` is set, bills the API request to that project. - - Effectively, copies blob to the same bucket with a new name, then - deletes the blob. - - .. warning:: - - This method will first duplicate the data and then delete the - old blob. This means that with very large objects renaming - could be a very (temporarily) costly or a very slow operation. - - :type blob: :class:`google.cloud.storage.blob.Blob` - :param blob: The blob to be renamed. - - :type new_name: str - :param new_name: The new name for this blob. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`Blob` - :returns: The newly-renamed blob. - """ - same_name = blob.name == new_name - - new_blob = self.copy_blob(blob, self, new_name, client=client) - - if not same_name: - blob.delete(client=client) - - return new_blob - - @property - def cors(self): - """Retrieve or set CORS policies configured for this bucket. - - See http://www.w3.org/TR/cors/ and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - .. note:: - - The getter for this property returns a list which contains - *copies* of the bucket's CORS policy mappings. Mutating the list - or one of its dicts has no effect unless you then re-assign the - dict via the setter. E.g.: - - >>> policies = bucket.cors - >>> policies.append({'origin': '/foo', ...}) - >>> policies[1]['maxAgeSeconds'] = 3600 - >>> del policies[0] - >>> bucket.cors = policies - >>> bucket.update() - - :setter: Set CORS policies for this bucket. - :getter: Gets the CORS policies for this bucket. - - :rtype: list of dictionaries - :returns: A sequence of mappings describing each CORS policy. - """ - return [copy.deepcopy(policy) for policy in self._properties.get("cors", ())] - - @cors.setter - def cors(self, entries): - """Set CORS policies configured for this bucket. - - See http://www.w3.org/TR/cors/ and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :type entries: list of dictionaries - :param entries: A sequence of mappings describing each CORS policy. - """ - self._patch_property("cors", entries) - - default_event_based_hold = _scalar_property("defaultEventBasedHold") - """Are uploaded objects automatically placed under an even-based hold? - - If True, uploaded objects will be placed under an event-based hold to - be released at a future time. When released an object will then begin - the retention period determined by the policy retention period for the - object bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - If the property is not set locally, returns ``None``. - - :rtype: bool or ``NoneType`` - """ - - @property - def default_kms_key_name(self): - """Retrieve / set default KMS encryption key for objects in the bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :setter: Set default KMS encryption key for items in this bucket. - :getter: Get default KMS encryption key for items in this bucket. - - :rtype: str - :returns: Default KMS encryption key, or ``None`` if not set. - """ - encryption_config = self._properties.get("encryption", {}) - return encryption_config.get("defaultKmsKeyName") - - @default_kms_key_name.setter - def default_kms_key_name(self, value): - """Set default KMS encryption key for objects in the bucket. - - :type value: str or None - :param value: new KMS key name (None to clear any existing key). - """ - encryption_config = self._properties.get("encryption", {}) - encryption_config["defaultKmsKeyName"] = value - self._patch_property("encryption", encryption_config) - - @property - def labels(self): - """Retrieve or set labels assigned to this bucket. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets#labels - - .. note:: - - The getter for this property returns a dict which is a *copy* - of the bucket's labels. Mutating that dict has no effect unless - you then re-assign the dict via the setter. E.g.: - - >>> labels = bucket.labels - >>> labels['new_key'] = 'some-label' - >>> del labels['old_key'] - >>> bucket.labels = labels - >>> bucket.update() - - :setter: Set labels for this bucket. - :getter: Gets the labels for this bucket. - - :rtype: :class:`dict` - :returns: Name-value pairs (string->string) labelling the bucket. - """ - labels = self._properties.get("labels") - if labels is None: - return {} - return copy.deepcopy(labels) - - @labels.setter - def labels(self, mapping): - """Set labels assigned to this bucket. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets#labels - - :type mapping: :class:`dict` - :param mapping: Name-value pairs (string->string) labelling the bucket. - """ - # If any labels have been expressly removed, we need to track this - # so that a future .patch() call can do the correct thing. - existing = set([k for k in self.labels.keys()]) - incoming = set([k for k in mapping.keys()]) - self._label_removals = self._label_removals.union(existing.difference(incoming)) - mapping = {k: str(v) for k, v in mapping.items()} - - # Actually update the labels on the object. - self._patch_property("labels", copy.deepcopy(mapping)) - - @property - def etag(self): - """Retrieve the ETag for the bucket. - - See https://tools.ietf.org/html/rfc2616#section-3.11 and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: str or ``NoneType`` - :returns: The bucket etag or ``None`` if the bucket's - resource has not been loaded from the server. - """ - return self._properties.get("etag") - - @property - def id(self): - """Retrieve the ID for the bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: str or ``NoneType`` - :returns: The ID of the bucket or ``None`` if the bucket's - resource has not been loaded from the server. - """ - return self._properties.get("id") - - @property - def iam_configuration(self): - """Retrieve IAM configuration for this bucket. - - :rtype: :class:`IAMConfiguration` - :returns: an instance for managing the bucket's IAM configuration. - """ - info = self._properties.get("iamConfiguration", {}) - return IAMConfiguration.from_api_repr(info, self) - - @property - def lifecycle_rules(self): - """Retrieve or set lifecycle rules configured for this bucket. - - See https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - .. note:: - - The getter for this property returns a list which contains - *copies* of the bucket's lifecycle rules mappings. Mutating the - list or one of its dicts has no effect unless you then re-assign - the dict via the setter. E.g.: - - >>> rules = bucket.lifecycle_rules - >>> rules.append({'origin': '/foo', ...}) - >>> rules[1]['rule']['action']['type'] = 'Delete' - >>> del rules[0] - >>> bucket.lifecycle_rules = rules - >>> bucket.update() - - :setter: Set lifestyle rules for this bucket. - :getter: Gets the lifestyle rules for this bucket. - - :rtype: generator(dict) - :returns: A sequence of mappings describing each lifecycle rule. - """ - info = self._properties.get("lifecycle", {}) - for rule in info.get("rule", ()): - action_type = rule["action"]["type"] - if action_type == "Delete": - yield LifecycleRuleDelete.from_api_repr(rule) - elif action_type == "SetStorageClass": - yield LifecycleRuleSetStorageClass.from_api_repr(rule) - else: - raise ValueError("Unknown lifecycle rule: {}".format(rule)) - - @lifecycle_rules.setter - def lifecycle_rules(self, rules): - """Set lifestyle rules configured for this bucket. - - See https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :type entries: list of dictionaries - :param entries: A sequence of mappings describing each lifecycle rule. - """ - rules = [dict(rule) for rule in rules] # Convert helpers if needed - self._patch_property("lifecycle", {"rule": rules}) - - def clear_lifecyle_rules(self): - """Set lifestyle rules configured for this bucket. - - See https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - """ - self.lifecycle_rules = [] - - def add_lifecycle_delete_rule(self, **kw): - """Add a "delete" rule to lifestyle rules configured for this bucket. - - See https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - .. literalinclude:: snippets.py - :start-after: [START add_lifecycle_delete_rule] - :end-before: [END add_lifecycle_delete_rule] - - :type kw: dict - :params kw: arguments passed to :class:`LifecycleRuleConditions`. - """ - rules = list(self.lifecycle_rules) - rules.append(LifecycleRuleDelete(**kw)) - self.lifecycle_rules = rules - - def add_lifecycle_set_storage_class_rule(self, storage_class, **kw): - """Add a "delete" rule to lifestyle rules configured for this bucket. - - See https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - .. literalinclude:: snippets.py - :start-after: [START add_lifecycle_set_storage_class_rule] - :end-before: [END add_lifecycle_set_storage_class_rule] - - :type storage_class: str, one of :attr:`STORAGE_CLASSES`. - :param storage_class: new storage class to assign to matching items. - - :type kw: dict - :params kw: arguments passed to :class:`LifecycleRuleConditions`. - """ - rules = list(self.lifecycle_rules) - rules.append(LifecycleRuleSetStorageClass(storage_class, **kw)) - self.lifecycle_rules = rules - - _location = _scalar_property("location") - - @property - def location(self): - """Retrieve location configured for this bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets and - https://cloud.google.com/storage/docs/bucket-locations - - Returns ``None`` if the property has not been set before creation, - or if the bucket's resource has not been loaded from the server. - :rtype: str or ``NoneType`` - """ - return self._location - - @location.setter - def location(self, value): - """(Deprecated) Set `Bucket.location` - - This can only be set at bucket **creation** time. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets and - https://cloud.google.com/storage/docs/bucket-locations - - .. warning:: - - Assignment to 'Bucket.location' is deprecated, as it is only - valid before the bucket is created. Instead, pass the location - to `Bucket.create`. - """ - warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2) - self._location = value - - @property - def location_type(self): - """Retrieve or set the location type for the bucket. - - See https://cloud.google.com/storage/docs/storage-classes - - :setter: Set the location type for this bucket. - :getter: Gets the the location type for this bucket. - - :rtype: str or ``NoneType`` - :returns: - If set, one of - :attr:`~google.cloud.storage.constants.MULTI_REGION_LOCATION_TYPE`, - :attr:`~google.cloud.storage.constants.REGION_LOCATION_TYPE`, or - :attr:`~google.cloud.storage.constants.DUAL_REGION_LOCATION_TYPE`, - else ``None``. - """ - return self._properties.get("locationType") - - def get_logging(self): - """Return info about access logging for this bucket. - - See https://cloud.google.com/storage/docs/access-logs#status - - :rtype: dict or None - :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` - (if logging is enabled), or None (if not). - """ - info = self._properties.get("logging") - return copy.deepcopy(info) - - def enable_logging(self, bucket_name, object_prefix=""): - """Enable access logging for this bucket. - - See https://cloud.google.com/storage/docs/access-logs - - :type bucket_name: str - :param bucket_name: name of bucket in which to store access logs - - :type object_prefix: str - :param object_prefix: prefix for access log filenames - """ - info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix} - self._patch_property("logging", info) - - def disable_logging(self): - """Disable access logging for this bucket. - - See https://cloud.google.com/storage/docs/access-logs#disabling - """ - self._patch_property("logging", None) - - @property - def metageneration(self): - """Retrieve the metageneration for the bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: int or ``NoneType`` - :returns: The metageneration of the bucket or ``None`` if the bucket's - resource has not been loaded from the server. - """ - metageneration = self._properties.get("metageneration") - if metageneration is not None: - return int(metageneration) - - @property - def owner(self): - """Retrieve info about the owner of the bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: dict or ``NoneType`` - :returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's - resource has not been loaded from the server. - """ - return copy.deepcopy(self._properties.get("owner")) - - @property - def project_number(self): - """Retrieve the number of the project to which the bucket is assigned. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: int or ``NoneType`` - :returns: The project number that owns the bucket or ``None`` if - the bucket's resource has not been loaded from the server. - """ - project_number = self._properties.get("projectNumber") - if project_number is not None: - return int(project_number) - - @property - def retention_policy_effective_time(self): - """Retrieve the effective time of the bucket's retention policy. - - :rtype: datetime.datetime or ``NoneType`` - :returns: point-in time at which the bucket's retention policy is - effective, or ``None`` if the property is not - set locally. - """ - policy = self._properties.get("retentionPolicy") - if policy is not None: - timestamp = policy.get("effectiveTime") - if timestamp is not None: - return _rfc3339_to_datetime(timestamp) - - @property - def retention_policy_locked(self): - """Retrieve whthere the bucket's retention policy is locked. - - :rtype: bool - :returns: True if the bucket's policy is locked, or else False - if the policy is not locked, or the property is not - set locally. - """ - policy = self._properties.get("retentionPolicy") - if policy is not None: - return policy.get("isLocked") - - @property - def retention_period(self): - """Retrieve or set the retention period for items in the bucket. - - :rtype: int or ``NoneType`` - :returns: number of seconds to retain items after upload or release - from event-based lock, or ``None`` if the property is not - set locally. - """ - policy = self._properties.get("retentionPolicy") - if policy is not None: - period = policy.get("retentionPeriod") - if period is not None: - return int(period) - - @retention_period.setter - def retention_period(self, value): - """Set the retention period for items in the bucket. - - :type value: int - :param value: - number of seconds to retain items after upload or release from - event-based lock. - - :raises ValueError: if the bucket's retention policy is locked. - """ - policy = self._properties.setdefault("retentionPolicy", {}) - if value is not None: - policy["retentionPeriod"] = str(value) - else: - policy = None - self._patch_property("retentionPolicy", policy) - - @property - def self_link(self): - """Retrieve the URI for the bucket. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: str or ``NoneType`` - :returns: The self link for the bucket or ``None`` if - the bucket's resource has not been loaded from the server. - """ - return self._properties.get("selfLink") - - @property - def storage_class(self): - """Retrieve or set the storage class for the bucket. - - See https://cloud.google.com/storage/docs/storage-classes - - :setter: Set the storage class for this bucket. - :getter: Gets the the storage class for this bucket. - - :rtype: str or ``NoneType`` - :returns: - If set, one of - :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, - or - :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, - else ``None``. - """ - return self._properties.get("storageClass") - - @storage_class.setter - def storage_class(self, value): - """Set the storage class for the bucket. - - See https://cloud.google.com/storage/docs/storage-classes - - :type value: str - :param value: - One of - :attr:`~google.cloud.storage.constants.NEARLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.COLDLINE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.ARCHIVE_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.STANDARD_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.MULTI_REGIONAL_LEGACY_STORAGE_CLASS`, - :attr:`~google.cloud.storage.constants.REGIONAL_LEGACY_STORAGE_CLASS`, - or - :attr:`~google.cloud.storage.constants.DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS`, - """ - if value not in self.STORAGE_CLASSES: - raise ValueError("Invalid storage class: %s" % (value,)) - self._patch_property("storageClass", value) - - @property - def time_created(self): - """Retrieve the timestamp at which the bucket was created. - - See https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the bucket's resource has not been loaded - from the server. - """ - value = self._properties.get("timeCreated") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def versioning_enabled(self): - """Is versioning enabled for this bucket? - - See https://cloud.google.com/storage/docs/object-versioning for - details. - - :setter: Update whether versioning is enabled for this bucket. - :getter: Query whether versioning is enabled for this bucket. - - :rtype: bool - :returns: True if enabled, else False. - """ - versioning = self._properties.get("versioning", {}) - return versioning.get("enabled", False) - - @versioning_enabled.setter - def versioning_enabled(self, value): - """Enable versioning for this bucket. - - See https://cloud.google.com/storage/docs/object-versioning for - details. - - :type value: convertible to boolean - :param value: should versioning be enabled for the bucket? - """ - self._patch_property("versioning", {"enabled": bool(value)}) - - @property - def requester_pays(self): - """Does the requester pay for API requests for this bucket? - - See https://cloud.google.com/storage/docs/requester-pays for - details. - - :setter: Update whether requester pays for this bucket. - :getter: Query whether requester pays for this bucket. - - :rtype: bool - :returns: True if requester pays for API requests for the bucket, - else False. - """ - versioning = self._properties.get("billing", {}) - return versioning.get("requesterPays", False) - - @requester_pays.setter - def requester_pays(self, value): - """Update whether requester pays for API requests for this bucket. - - See https://cloud.google.com/storage/docs/ for - details. - - :type value: convertible to boolean - :param value: should requester pay for API requests for the bucket? - """ - self._patch_property("billing", {"requesterPays": bool(value)}) - - def configure_website(self, main_page_suffix=None, not_found_page=None): - """Configure website-related properties. - - See https://cloud.google.com/storage/docs/hosting-static-website - - .. note:: - This (apparently) only works - if your bucket name is a domain name - (and to do that, you need to get approved somehow...). - - If you want this bucket to host a website, just provide the name - of an index page and a page to use when a blob isn't found: - - .. literalinclude:: snippets.py - :start-after: [START configure_website] - :end-before: [END configure_website] - - You probably should also make the whole bucket public: - - .. literalinclude:: snippets.py - :start-after: [START make_public] - :end-before: [END make_public] - - This says: "Make the bucket public, and all the stuff already in - the bucket, and anything else I add to the bucket. Just make it - all public." - - :type main_page_suffix: str - :param main_page_suffix: The page to use as the main page - of a directory. - Typically something like index.html. - - :type not_found_page: str - :param not_found_page: The file to use when a page isn't found. - """ - data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page} - self._patch_property("website", data) - - def disable_website(self): - """Disable the website configuration for this bucket. - - This is really just a shortcut for setting the website-related - attributes to ``None``. - """ - return self.configure_website(None, None) - - def get_iam_policy(self, client=None, requested_policy_version=None): - """Retrieve the IAM policy for the bucket. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy - - If :attr:`user_project` is set, bills the API request to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :type requested_policy_version: int or ``NoneType`` - :param requested_policy_version: Optional. The version of IAM policies to request. - If a policy with a condition is requested without - setting this, the server will return an error. - This must be set to a value of 3 to retrieve IAM - policies containing conditions. This is to prevent - client code that isn't aware of IAM conditions from - interpreting and modifying policies incorrectly. - The service might return a policy with version lower - than the one that was requested, based on the - feature syntax in the policy fetched. - - :rtype: :class:`google.api_core.iam.Policy` - :returns: the policy instance, based on the resource returned from - the ``getIamPolicy`` API request. - - Example: - - .. code-block:: python - - from google.cloud.storage.iam import STORAGE_OBJECT_VIEWER_ROLE - - policy = bucket.get_iam_policy(requested_policy_version=3) - - policy.version = 3 - - # Add a binding to the policy via it's bindings property - policy.bindings.append({ - "role": STORAGE_OBJECT_VIEWER_ROLE, - "members": {"serviceAccount:account@project.iam.gserviceaccount.com", ...}, - # Optional: - "condition": { - "title": "prefix" - "description": "Objects matching prefix" - "expression": "resource.name.startsWith(\"projects/project-name/buckets/bucket-name/objects/prefix\")" - } - }) - - bucket.set_iam_policy(policy) - """ - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - if requested_policy_version is not None: - query_params["optionsRequestedPolicyVersion"] = requested_policy_version - - info = client._connection.api_request( - method="GET", - path="%s/iam" % (self.path,), - query_params=query_params, - _target_object=None, - ) - return Policy.from_api_repr(info) - - def set_iam_policy(self, policy, client=None): - """Update the IAM policy for the bucket. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy - - If :attr:`user_project` is set, bills the API request to that project. - - :type policy: :class:`google.api_core.iam.Policy` - :param policy: policy instance used to update bucket's IAM policy. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`google.api_core.iam.Policy` - :returns: the policy instance, based on the resource returned from - the ``setIamPolicy`` API request. - """ - client = self._require_client(client) - query_params = {} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - resource = policy.to_api_repr() - resource["resourceId"] = self.path - info = client._connection.api_request( - method="PUT", - path="%s/iam" % (self.path,), - query_params=query_params, - data=resource, - _target_object=None, - ) - return Policy.from_api_repr(info) - - def test_iam_permissions(self, permissions, client=None): - """API call: test permissions - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions - - If :attr:`user_project` is set, bills the API request to that project. - - :type permissions: list of string - :param permissions: the permissions to check - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: list of string - :returns: the permissions returned by the ``testIamPermissions`` API - request. - """ - client = self._require_client(client) - query_params = {"permissions": permissions} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - path = "%s/iam/testPermissions" % (self.path,) - resp = client._connection.api_request( - method="GET", path=path, query_params=query_params - ) - return resp.get("permissions", []) - - def make_public(self, recursive=False, future=False, client=None): - """Update bucket's ACL, granting read access to anonymous users. - - :type recursive: bool - :param recursive: If True, this will make all blobs inside the bucket - public as well. - - :type future: bool - :param future: If True, this will make all objects created in the - future public as well. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises ValueError: - If ``recursive`` is True, and the bucket contains more than 256 - blobs. This is to prevent extremely long runtime of this - method. For such buckets, iterate over the blobs returned by - :meth:`list_blobs` and call - :meth:`~google.cloud.storage.blob.Blob.make_public` - for each blob. - """ - self.acl.all().grant_read() - self.acl.save(client=client) - - if future: - doa = self.default_object_acl - if not doa.loaded: - doa.reload(client=client) - doa.all().grant_read() - doa.save(client=client) - - if recursive: - blobs = list( - self.list_blobs( - projection="full", - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, - client=client, - ) - ) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: - message = ( - "Refusing to make public recursively with more than " - "%d objects. If you actually want to make every object " - "in this bucket public, iterate through the blobs " - "returned by 'Bucket.list_blobs()' and call " - "'make_public' on each one." - ) % (self._MAX_OBJECTS_FOR_ITERATION,) - raise ValueError(message) - - for blob in blobs: - blob.acl.all().grant_read() - blob.acl.save(client=client) - - def make_private(self, recursive=False, future=False, client=None): - """Update bucket's ACL, revoking read access for anonymous users. - - :type recursive: bool - :param recursive: If True, this will make all blobs inside the bucket - private as well. - - :type future: bool - :param future: If True, this will make all objects created in the - future private as well. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises ValueError: - If ``recursive`` is True, and the bucket contains more than 256 - blobs. This is to prevent extremely long runtime of this - method. For such buckets, iterate over the blobs returned by - :meth:`list_blobs` and call - :meth:`~google.cloud.storage.blob.Blob.make_private` - for each blob. - """ - self.acl.all().revoke_read() - self.acl.save(client=client) - - if future: - doa = self.default_object_acl - if not doa.loaded: - doa.reload(client=client) - doa.all().revoke_read() - doa.save(client=client) - - if recursive: - blobs = list( - self.list_blobs( - projection="full", - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, - client=client, - ) - ) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: - message = ( - "Refusing to make private recursively with more than " - "%d objects. If you actually want to make every object " - "in this bucket private, iterate through the blobs " - "returned by 'Bucket.list_blobs()' and call " - "'make_private' on each one." - ) % (self._MAX_OBJECTS_FOR_ITERATION,) - raise ValueError(message) - - for blob in blobs: - blob.acl.all().revoke_read() - blob.acl.save(client=client) - - def generate_upload_policy(self, conditions, expiration=None, client=None): - """Create a signed upload policy for uploading objects. - - This method generates and signs a policy document. You can use - `policy documents`_ to allow visitors to a website to upload files to - Google Cloud Storage without giving them direct write access. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START policy_document] - :end-before: [END policy_document] - - .. _policy documents: - https://cloud.google.com/storage/docs/xml-api\ - /post-object#policydocument - - :type expiration: datetime - :param expiration: Optional expiration in UTC. If not specified, the - policy will expire in 1 hour. - - :type conditions: list - :param conditions: A list of conditions as described in the - `policy documents`_ documentation. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: dict - :returns: A dictionary of (form field name, form field value) of form - fields that should be added to your HTML upload form in order - to attach the signature. - """ - client = self._require_client(client) - credentials = client._base_connection.credentials - _signing.ensure_signed_credentials(credentials) - - if expiration is None: - expiration = _NOW() + datetime.timedelta(hours=1) - - conditions = conditions + [{"bucket": self.name}] - - policy_document = { - "expiration": _datetime_to_rfc3339(expiration), - "conditions": conditions, - } - - encoded_policy_document = base64.b64encode( - json.dumps(policy_document).encode("utf-8") - ) - signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document)) - - fields = { - "bucket": self.name, - "GoogleAccessId": credentials.signer_email, - "policy": encoded_policy_document.decode("utf-8"), - "signature": signature.decode("utf-8"), - } - - return fields - - def lock_retention_policy(self, client=None): - """Lock the bucket's retention policy. - - :raises ValueError: - if the bucket has no metageneration (i.e., new or never reloaded); - if the bucket has no retention policy assigned; - if the bucket's retention policy is already locked. - """ - if "metageneration" not in self._properties: - raise ValueError("Bucket has no retention policy assigned: try 'reload'?") - - policy = self._properties.get("retentionPolicy") - - if policy is None: - raise ValueError("Bucket has no retention policy assigned: try 'reload'?") - - if policy.get("isLocked"): - raise ValueError("Bucket's retention policy is already locked.") - - client = self._require_client(client) - - query_params = {"ifMetagenerationMatch": self.metageneration} - - if self.user_project is not None: - query_params["userProject"] = self.user_project - - path = "/b/{}/lockRetentionPolicy".format(self.name) - api_response = client._connection.api_request( - method="POST", path=path, query_params=query_params, _target_object=self - ) - self._set_properties(api_response) - - def generate_signed_url( - self, - expiration=None, - api_access_endpoint=_API_ACCESS_ENDPOINT, - method="GET", - headers=None, - query_parameters=None, - client=None, - credentials=None, - version=None, - ): - """Generates a signed URL for this bucket. - - .. note:: - - If you are on Google Compute Engine, you can't generate a signed - URL using GCE service account. Follow `Issue 50`_ for updates on - this. If you'd like to be able to generate a signed URL from GCE, - you can use a standard service account from a JSON file rather - than a GCE service account. - - .. _Issue 50: https://github.com/GoogleCloudPlatform/\ - google-auth-library-python/issues/50 - - If you have a bucket that you want to allow access to for a set - amount of time, you can use this method to generate a URL that - is only valid within a certain time period. - - This is particularly useful if you don't want publicly - accessible buckets, but don't want to require users to explicitly - log in. - - :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] - :param expiration: Point in time when the signed URL should expire. - - :type api_access_endpoint: str - :param api_access_endpoint: Optional URI base. - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - - :type headers: dict - :param headers: - (Optional) Additional HTTP headers to be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers - Requests using the signed URL *must* pass the specified header - (name and value) with each request for the URL. - - :type query_parameters: dict - :param query_parameters: - (Optional) Additional query paramtersto be included as part of the - signed URLs. See: - https://cloud.google.com/storage/docs/xml-api/reference-headers#query - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - - :type credentials: :class:`google.auth.credentials.Credentials` or - :class:`NoneType` - :param credentials: The authorization credentials to attach to requests. - These credentials identify this application to the service. - If none are specified, the client will attempt to ascertain - the credentials from the environment. - - :type version: str - :param version: (Optional) The version of signed credential to create. - Must be one of 'v2' | 'v4'. - - :raises: :exc:`ValueError` when version is invalid. - :raises: :exc:`TypeError` when expiration is not a valid type. - :raises: :exc:`AttributeError` if credentials is not an instance - of :class:`google.auth.credentials.Signing`. - - :rtype: str - :returns: A signed URL you can use to access the resource - until expiration. - """ - if version is None: - version = "v2" - elif version not in ("v2", "v4"): - raise ValueError("'version' must be either 'v2' or 'v4'") - - resource = "/{bucket_name}".format(bucket_name=self.name) - - if credentials is None: - client = self._require_client(client) - credentials = client._credentials - - if version == "v2": - helper = generate_signed_url_v2 - else: - helper = generate_signed_url_v4 - - return helper( - credentials, - resource=resource, - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method.upper(), - headers=headers, - query_parameters=query_parameters, - ) diff --git a/storage/google/cloud/storage/client.py b/storage/google/cloud/storage/client.py deleted file mode 100644 index 9c89b342df4d..000000000000 --- a/storage/google/cloud/storage/client.py +++ /dev/null @@ -1,789 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud Storage API.""" - -import google.api_core.client_options - -from google.auth.credentials import AnonymousCredentials - -from google.api_core import page_iterator -from google.cloud._helpers import _LocalStack -from google.cloud.client import ClientWithProject -from google.cloud.exceptions import NotFound -from google.cloud.storage._helpers import _get_storage_host -from google.cloud.storage._http import Connection -from google.cloud.storage.batch import Batch -from google.cloud.storage.bucket import Bucket -from google.cloud.storage.blob import Blob -from google.cloud.storage.hmac_key import HMACKeyMetadata -from google.cloud.storage.acl import BucketACL -from google.cloud.storage.acl import DefaultObjectACL - - -_marker = object() - - -class Client(ClientWithProject): - """Client to bundle configuration needed for API requests. - - :type project: str or None - :param project: the project which the client acts on behalf of. Will be - passed when creating a topic. If not passed, - falls back to the default inferred from the environment. - - :type credentials: :class:`~google.auth.credentials.Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - client. If not passed (and if no ``_http`` object is - passed), falls back to the default inferred from the - environment. - - :type _http: :class:`~requests.Session` - :param _http: (Optional) HTTP object to make requests. Can be any object - that defines ``request()`` with the same interface as - :meth:`requests.Session.request`. If not passed, an - ``_http`` object is created that is bound to the - ``credentials`` for the current object. - This parameter should be considered private, and could - change in the future. - - :type client_info: :class:`~google.api_core.client_info.ClientInfo` - :param client_info: - The client info used to send a user-agent string along with API - requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library - or partner tool. - :type client_options: :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` - :param client_options: (Optional) Client options used to set user options on the client. - API Endpoint should be set through client_options. - """ - - SCOPE = ( - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write", - ) - """The scopes required for authenticating as a Cloud Storage consumer.""" - - def __init__( - self, - project=_marker, - credentials=None, - _http=None, - client_info=None, - client_options=None, - ): - self._base_connection = None - if project is None: - no_project = True - project = "" - else: - no_project = False - if project is _marker: - project = None - super(Client, self).__init__( - project=project, credentials=credentials, _http=_http - ) - - kw_args = {"client_info": client_info} - - kw_args["api_endpoint"] = _get_storage_host() - - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - kw_args["api_endpoint"] = api_endpoint - - if no_project: - self.project = None - self._connection = Connection(self, **kw_args) - self._batch_stack = _LocalStack() - - @classmethod - def create_anonymous_client(cls): - """Factory: return client with anonymous credentials. - - .. note:: - - Such a client has only limited access to "public" buckets: - listing their contents and downloading their blobs. - - :rtype: :class:`google.cloud.storage.client.Client` - :returns: Instance w/ anonymous credentials and no project. - """ - client = cls(project="", credentials=AnonymousCredentials()) - client.project = None - return client - - @property - def _connection(self): - """Get connection or batch on the client. - - :rtype: :class:`google.cloud.storage._http.Connection` - :returns: The connection set on the client, or the batch - if one is set. - """ - if self.current_batch is not None: - return self.current_batch - else: - return self._base_connection - - @_connection.setter - def _connection(self, value): - """Set connection on the client. - - Intended to be used by constructor (since the base class calls) - self._connection = connection - Will raise if the connection is set more than once. - - :type value: :class:`google.cloud.storage._http.Connection` - :param value: The connection set on the client. - - :raises: :class:`ValueError` if connection has already been set. - """ - if self._base_connection is not None: - raise ValueError("Connection already set on client") - self._base_connection = value - - def _push_batch(self, batch): - """Push a batch onto our stack. - - "Protected", intended for use by batch context mgrs. - - :type batch: :class:`google.cloud.storage.batch.Batch` - :param batch: newly-active batch - """ - self._batch_stack.push(batch) - - def _pop_batch(self): - """Pop a batch from our stack. - - "Protected", intended for use by batch context mgrs. - - :raises: IndexError if the stack is empty. - :rtype: :class:`google.cloud.storage.batch.Batch` - :returns: the top-most batch/transaction, after removing it. - """ - return self._batch_stack.pop() - - def _bucket_arg_to_bucket(self, bucket_or_name): - """Helper to return given bucket or create new by name. - - Args: - bucket_or_name (Union[ \ - :class:`~google.cloud.storage.bucket.Bucket`, \ - str, \ - ]): - The bucket resource to pass or name to create. - - Returns: - google.cloud.storage.bucket.Bucket - The newly created bucket or the given one. - """ - if isinstance(bucket_or_name, Bucket): - bucket = bucket_or_name - else: - bucket = Bucket(self, name=bucket_or_name) - return bucket - - @property - def current_batch(self): - """Currently-active batch. - - :rtype: :class:`google.cloud.storage.batch.Batch` or ``NoneType`` (if - no batch is active). - :returns: The batch at the top of the batch stack. - """ - return self._batch_stack.top - - def get_service_account_email(self, project=None): - """Get the email address of the project's GCS service account - - :type project: str - :param project: - (Optional) Project ID to use for retreiving GCS service account - email address. Defaults to the client's project. - - :rtype: str - :returns: service account email address - """ - if project is None: - project = self.project - path = "/projects/%s/serviceAccount" % (project,) - api_response = self._base_connection.api_request(method="GET", path=path) - return api_response["email_address"] - - def bucket(self, bucket_name, user_project=None): - """Factory constructor for bucket object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a bucket object owned by this client. - - :type bucket_name: str - :param bucket_name: The name of the bucket to be instantiated. - - :type user_project: str - :param user_project: (Optional) the project ID to be billed for API - requests made via the bucket. - - :rtype: :class:`google.cloud.storage.bucket.Bucket` - :returns: The bucket object created. - """ - return Bucket(client=self, name=bucket_name, user_project=user_project) - - def batch(self): - """Factory constructor for batch object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a batch object owned by this client. - - :rtype: :class:`google.cloud.storage.batch.Batch` - :returns: The batch object created. - """ - return Batch(client=self) - - def get_bucket(self, bucket_or_name): - """API call: retrieve a bucket via a GET request. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets/get - - Args: - bucket_or_name (Union[ \ - :class:`~google.cloud.storage.bucket.Bucket`, \ - str, \ - ]): - The bucket resource to pass or name to create. - - Returns: - google.cloud.storage.bucket.Bucket - The bucket matching the name provided. - - Raises: - google.cloud.exceptions.NotFound - If the bucket is not found. - - Examples: - Retrieve a bucket using a string. - - .. literalinclude:: snippets.py - :start-after: [START get_bucket] - :end-before: [END get_bucket] - - Get a bucket using a resource. - - >>> from google.cloud import storage - >>> client = storage.Client() - - >>> # Set properties on a plain resource object. - >>> bucket = client.get_bucket("my-bucket-name") - - >>> # Time passes. Another program may have modified the bucket - ... # in the meantime, so you want to get the latest state. - >>> bucket = client.get_bucket(bucket) # API request. - - """ - bucket = self._bucket_arg_to_bucket(bucket_or_name) - - bucket.reload(client=self) - return bucket - - def lookup_bucket(self, bucket_name): - """Get a bucket by name, returning None if not found. - - You can use this if you would rather check for a None value - than catching an exception: - - .. literalinclude:: snippets.py - :start-after: [START lookup_bucket] - :end-before: [END lookup_bucket] - - :type bucket_name: str - :param bucket_name: The name of the bucket to get. - - :rtype: :class:`google.cloud.storage.bucket.Bucket` - :returns: The bucket matching the name provided or None if not found. - """ - try: - return self.get_bucket(bucket_name) - except NotFound: - return None - - def create_bucket( - self, - bucket_or_name, - requester_pays=None, - project=None, - user_project=None, - location=None, - predefined_acl=None, - predefined_default_object_acl=None, - ): - """API call: create a new bucket via a POST request. - - See - https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - - Args: - bucket_or_name (Union[ \ - :class:`~google.cloud.storage.bucket.Bucket`, \ - str, \ - ]): - The bucket resource to pass or name to create. - requester_pays (bool): - Optional. Whether requester pays for API requests for this - bucket and its blobs. - project (str): - Optional. The project under which the bucket is to be created. - If not passed, uses the project set on the client. - user_project (str): - Optional. The project ID to be billed for API requests - made via created bucket. - location (str): - Optional. The location of the bucket. If not passed, - the default location, US, will be used. See - https://cloud.google.com/storage/docs/bucket-locations - predefined_acl (str): - Optional. Name of predefined ACL to apply to bucket. See: - https://cloud.google.com/storage/docs/access-control/lists#predefined-acl - predefined_default_object_acl (str): - Optional. Name of predefined ACL to apply to bucket's objects. See: - https://cloud.google.com/storage/docs/access-control/lists#predefined-acl - - Returns: - google.cloud.storage.bucket.Bucket - The newly created bucket. - - Raises: - google.cloud.exceptions.Conflict - If the bucket already exists. - - Examples: - Create a bucket using a string. - - .. literalinclude:: snippets.py - :start-after: [START create_bucket] - :end-before: [END create_bucket] - - Create a bucket using a resource. - - >>> from google.cloud import storage - >>> client = storage.Client() - - >>> # Set properties on a plain resource object. - >>> bucket = storage.Bucket("my-bucket-name") - >>> bucket.location = "europe-west6" - >>> bucket.storage_class = "COLDLINE" - - >>> # Pass that resource object to the client. - >>> bucket = client.create_bucket(bucket) # API request. - - """ - bucket = self._bucket_arg_to_bucket(bucket_or_name) - - if project is None: - project = self.project - - if project is None: - raise ValueError("Client project not set: pass an explicit project.") - - if requester_pays is not None: - bucket.requester_pays = requester_pays - - query_params = {"project": project} - - if predefined_acl is not None: - predefined_acl = BucketACL.validate_predefined(predefined_acl) - query_params["predefinedAcl"] = predefined_acl - - if predefined_default_object_acl is not None: - predefined_default_object_acl = DefaultObjectACL.validate_predefined( - predefined_default_object_acl - ) - query_params["predefinedDefaultObjectAcl"] = predefined_default_object_acl - - if user_project is not None: - query_params["userProject"] = user_project - - properties = {key: bucket._properties[key] for key in bucket._changes} - properties["name"] = bucket.name - - if location is not None: - properties["location"] = location - - api_response = self._connection.api_request( - method="POST", - path="/b", - query_params=query_params, - data=properties, - _target_object=bucket, - ) - - bucket._set_properties(api_response) - return bucket - - def download_blob_to_file(self, blob_or_uri, file_obj, start=None, end=None): - """Download the contents of a blob object or blob URI into a file-like object. - - Args: - blob_or_uri (Union[ \ - :class:`~google.cloud.storage.blob.Blob`, \ - str, \ - ]): - The blob resource to pass or URI to download. - file_obj (file): - A file handle to which to write the blob's data. - start (int): - Optional. The first byte in a range to be downloaded. - end (int): - Optional. The last byte in a range to be downloaded. - - Examples: - Download a blob using using a blob resource. - - >>> from google.cloud import storage - >>> client = storage.Client() - - >>> bucket = client.get_bucket('my-bucket-name') - >>> blob = storage.Blob('path/to/blob', bucket) - - >>> with open('file-to-download-to') as file_obj: - >>> client.download_blob_to_file(blob, file_obj) # API request. - - - Download a blob using a URI. - - >>> from google.cloud import storage - >>> client = storage.Client() - - >>> with open('file-to-download-to') as file_obj: - >>> client.download_blob_to_file( - >>> 'gs://bucket_name/path/to/blob', file_obj) - - - """ - try: - blob_or_uri.download_to_file(file_obj, client=self, start=start, end=end) - except AttributeError: - blob = Blob.from_string(blob_or_uri) - blob.download_to_file(file_obj, client=self, start=start, end=end) - - def list_blobs( - self, - bucket_or_name, - max_results=None, - page_token=None, - prefix=None, - delimiter=None, - versions=None, - projection="noAcl", - fields=None, - ): - """Return an iterator used to find blobs in the bucket. - - If :attr:`user_project` is set, bills the API request to that project. - - Args: - bucket_or_name (Union[ \ - :class:`~google.cloud.storage.bucket.Bucket`, \ - str, \ - ]): - The bucket resource to pass or name to create. - - max_results (int): - (Optional) The maximum number of blobs in each page of results - from this request. Non-positive values are ignored. Defaults to - a sensible value set by the API. - - page_token (str): - (Optional) If present, return the next batch of blobs, using the - value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing the - token. - - prefix (str): - (Optional) prefix used to filter blobs. - - delimiter (str): - (Optional) Delimiter, used with ``prefix`` to - emulate hierarchy. - - versions (bool): - (Optional) Whether object versions should be returned - as separate blobs. - - projection (str): - (Optional) If used, must be 'full' or 'noAcl'. - Defaults to ``'noAcl'``. Specifies the set of - properties to return. - - fields (str): - (Optional) Selector specifying which fields to include - in a partial response. Must be a list of fields. For - example to get a partial response with just the next - page token and the name and language of each blob returned: - ``'items(name,contentLanguage),nextPageToken'``. - See: https://cloud.google.com/storage/docs/json_api/v1/parameters#fields - - Returns: - Iterator of all :class:`~google.cloud.storage.blob.Blob` - in this bucket matching the arguments. - """ - bucket = self._bucket_arg_to_bucket(bucket_or_name) - return bucket.list_blobs( - max_results=max_results, - page_token=page_token, - prefix=prefix, - delimiter=delimiter, - versions=versions, - projection=projection, - fields=fields, - client=self, - ) - - def list_buckets( - self, - max_results=None, - page_token=None, - prefix=None, - projection="noAcl", - fields=None, - project=None, - ): - """Get all buckets in the project associated to the client. - - This will not populate the list of blobs available in each - bucket. - - .. literalinclude:: snippets.py - :start-after: [START list_buckets] - :end-before: [END list_buckets] - - This implements "storage.buckets.list". - - :type max_results: int - :param max_results: Optional. The maximum number of buckets to return. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of buckets, using the - value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing the - token. - - :type prefix: str - :param prefix: Optional. Filter results to buckets whose names begin - with this prefix. - - :type projection: str - :param projection: - (Optional) Specifies the set of properties to return. If used, must - be 'full' or 'noAcl'. Defaults to 'noAcl'. - - :type fields: str - :param fields: - (Optional) Selector specifying which fields to include in a partial - response. Must be a list of fields. For example to get a partial - response with just the next page token and the language of each - bucket returned: 'items/id,nextPageToken' - - :type project: str - :param project: (Optional) the project whose buckets are to be listed. - If not passed, uses the project set on the client. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :raises ValueError: if both ``project`` is ``None`` and the client's - project is also ``None``. - :returns: Iterator of all :class:`~google.cloud.storage.bucket.Bucket` - belonging to this project. - """ - if project is None: - project = self.project - - if project is None: - raise ValueError("Client project not set: pass an explicit project.") - - extra_params = {"project": project} - - if prefix is not None: - extra_params["prefix"] = prefix - - extra_params["projection"] = projection - - if fields is not None: - extra_params["fields"] = fields - - return page_iterator.HTTPIterator( - client=self, - api_request=self._connection.api_request, - path="/b", - item_to_value=_item_to_bucket, - page_token=page_token, - max_results=max_results, - extra_params=extra_params, - ) - - def create_hmac_key( - self, service_account_email, project_id=None, user_project=None - ): - """Create an HMAC key for a service account. - - :type service_account_email: str - :param service_account_email: e-mail address of the service account - - :type project_id: str - :param project_id: (Optional) explicit project ID for the key. - Defaults to the client's project. - - :type user_project: str - :param user_project: (Optional) This parameter is currently ignored. - - :rtype: - Tuple[:class:`~google.cloud.storage.hmac_key.HMACKeyMetadata`, str] - :returns: metadata for the created key, plus the bytes of the key's secret, which is an 40-character base64-encoded string. - """ - if project_id is None: - project_id = self.project - - path = "/projects/{}/hmacKeys".format(project_id) - qs_params = {"serviceAccountEmail": service_account_email} - - if user_project is not None: - qs_params["userProject"] = user_project - - api_response = self._connection.api_request( - method="POST", path=path, query_params=qs_params - ) - metadata = HMACKeyMetadata(self) - metadata._properties = api_response["metadata"] - secret = api_response["secret"] - return metadata, secret - - def list_hmac_keys( - self, - max_results=None, - service_account_email=None, - show_deleted_keys=None, - project_id=None, - user_project=None, - ): - """List HMAC keys for a project. - - :type max_results: int - :param max_results: - (Optional) max number of keys to return in a given page. - - :type service_account_email: str - :param service_account_email: - (Optional) limit keys to those created by the given service account. - - :type show_deleted_keys: bool - :param show_deleted_keys: - (Optional) included deleted keys in the list. Default is to - exclude them. - - :type project_id: str - :param project_id: (Optional) explicit project ID for the key. - Defaults to the client's project. - - :type user_project: str - :param user_project: (Optional) This parameter is currently ignored. - - :rtype: - Tuple[:class:`~google.cloud.storage.hmac_key.HMACKeyMetadata`, str] - :returns: metadata for the created key, plus the bytes of the key's secret, which is an 40-character base64-encoded string. - """ - if project_id is None: - project_id = self.project - - path = "/projects/{}/hmacKeys".format(project_id) - extra_params = {} - - if service_account_email is not None: - extra_params["serviceAccountEmail"] = service_account_email - - if show_deleted_keys is not None: - extra_params["showDeletedKeys"] = show_deleted_keys - - if user_project is not None: - extra_params["userProject"] = user_project - - return page_iterator.HTTPIterator( - client=self, - api_request=self._connection.api_request, - path=path, - item_to_value=_item_to_hmac_key_metadata, - max_results=max_results, - extra_params=extra_params, - ) - - def get_hmac_key_metadata(self, access_id, project_id=None, user_project=None): - """Return a metadata instance for the given HMAC key. - - :type access_id: str - :param access_id: Unique ID of an existing key. - - :type project_id: str - :param project_id: (Optional) project ID of an existing key. - Defaults to client's project. - - :type user_project: str - :param user_project: (Optional) This parameter is currently ignored. - """ - metadata = HMACKeyMetadata(self, access_id, project_id, user_project) - metadata.reload() # raises NotFound for missing key - return metadata - - -def _item_to_bucket(iterator, item): - """Convert a JSON bucket to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that has retrieved the item. - - :type item: dict - :param item: An item to be converted to a bucket. - - :rtype: :class:`.Bucket` - :returns: The next bucket in the page. - """ - name = item.get("name") - bucket = Bucket(iterator.client, name) - bucket._set_properties(item) - return bucket - - -def _item_to_hmac_key_metadata(iterator, item): - """Convert a JSON key metadata resource to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that has retrieved the item. - - :type item: dict - :param item: An item to be converted to a key metadata instance. - - :rtype: :class:`~google.cloud.storage.hmac_key.HMACKeyMetadata` - :returns: The next key metadata instance in the page. - """ - metadata = HMACKeyMetadata(iterator.client) - metadata._properties = item - return metadata diff --git a/storage/google/cloud/storage/constants.py b/storage/google/cloud/storage/constants.py deleted file mode 100644 index faadff1f0702..000000000000 --- a/storage/google/cloud/storage/constants.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Constants used acros google.cloud.storage modules.""" - -# Storage classes - -STANDARD_STORAGE_CLASS = "STANDARD" -"""Storage class for objects accessed more than once per month. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -NEARLINE_STORAGE_CLASS = "NEARLINE" -"""Storage class for objects accessed at most once per month. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -COLDLINE_STORAGE_CLASS = "COLDLINE" -"""Storage class for objects accessed at most once per year. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -ARCHIVE_STORAGE_CLASS = "ARCHIVE" -"""Storage class for objects accessed less frequently than once per year. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -MULTI_REGIONAL_LEGACY_STORAGE_CLASS = "MULTI_REGIONAL" -"""Legacy storage class. - -Alias for :attr:`STANDARD_STORAGE_CLASS`. - -Can only be used for objects in buckets whose -:attr:`~google.cloud.storage.bucket.Bucket.location_type` is -:attr:`~google.cloud.storage.bucket.Bucket.MULTI_REGION_LOCATION_TYPE`. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -REGIONAL_LEGACY_STORAGE_CLASS = "REGIONAL" -"""Legacy storage class. - -Alias for :attr:`STANDARD_STORAGE_CLASS`. - -Can only be used for objects in buckets whose -:attr:`~google.cloud.storage.bucket.Bucket.location_type` is -:attr:`~google.cloud.storage.bucket.Bucket.REGION_LOCATION_TYPE`. - -See: https://cloud.google.com/storage/docs/storage-classes -""" - -DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS = "DURABLE_REDUCED_AVAILABILITY" -"""Legacy storage class. - -Similar to :attr:`NEARLINE_STORAGE_CLASS`. -""" - - -# Location types - -MULTI_REGION_LOCATION_TYPE = "multi-region" -"""Location type: data will be replicated across regions in a multi-region. - -Provides highest availability across largest area. -""" - -REGION_LOCATION_TYPE = "region" -"""Location type: data will be stored within a single region. - -Provides lowest latency within a single region. -""" - -DUAL_REGION_LOCATION_TYPE = "dual-region" -"""Location type: data will be stored within two primary regions. - -Provides high availability and low latency across two regions. -""" diff --git a/storage/google/cloud/storage/hmac_key.py b/storage/google/cloud/storage/hmac_key.py deleted file mode 100644 index 96ccbcaed910..000000000000 --- a/storage/google/cloud/storage/hmac_key.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.cloud.exceptions import NotFound -from google.cloud._helpers import _rfc3339_to_datetime - - -class HMACKeyMetadata(object): - """Metadata about an HMAC service account key withn Cloud Storage. - - :type client: :class:`~google.cloud.stoage.client.Client` - :param client: client associated with the key metadata. - - :type access_id: str - :param access_id: (Optional) unique ID of an existing key. - - :type project_id: str - :param project_id: (Optional) project ID of an existing key. - Defaults to client's project. - - :type user_project: str - :param user_project: (Optional) This parameter is currently ignored. - """ - - ACTIVE_STATE = "ACTIVE" - """Key is active, and may be used to sign requests.""" - INACTIVE_STATE = "INACTIVE" - """Key is inactive, and may not be used to sign requests. - - It can be re-activated via :meth:`update`. - """ - DELETED_STATE = "DELETED" - """Key is deleted. It cannot be re-activated.""" - - _SETTABLE_STATES = (ACTIVE_STATE, INACTIVE_STATE) - - def __init__(self, client, access_id=None, project_id=None, user_project=None): - self._client = client - self._properties = {} - - if access_id is not None: - self._properties["accessId"] = access_id - - if project_id is not None: - self._properties["projectId"] = project_id - - self._user_project = user_project - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - - return self._client == other._client and self.access_id == other.access_id - - def __hash__(self): - return hash(self._client) + hash(self.access_id) - - @property - def access_id(self): - """Access ID of the key. - - :rtype: str or None - :returns: unique identifier of the key within a project. - """ - return self._properties.get("accessId") - - @property - def etag(self): - """ETag identifying the version of the key metadata. - - :rtype: str or None - :returns: ETag for the version of the key's metadata. - """ - return self._properties.get("etag") - - @property - def id(self): - """ID of the key, including the Project ID and the Access ID. - - :rtype: str or None - :returns: ID of the key. - """ - return self._properties.get("id") - - @property - def project(self): - """Project ID associated with the key. - - :rtype: str or None - :returns: project identfier for the key. - """ - return self._properties.get("projectId") - - @property - def service_account_email(self): - """Service account e-mail address associated with the key. - - :rtype: str or None - :returns: e-mail address for the service account which created the key. - """ - return self._properties.get("serviceAccountEmail") - - @property - def state(self): - """Get / set key's state. - - One of: - - ``ACTIVE`` - - ``INACTIVE`` - - ``DELETED`` - - :rtype: str or None - :returns: key's current state. - """ - return self._properties.get("state") - - @state.setter - def state(self, value): - if value not in self._SETTABLE_STATES: - raise ValueError( - "State may only be set to one of: {}".format( - ", ".join(self._SETTABLE_STATES) - ) - ) - - self._properties["state"] = value - - @property - def time_created(self): - """Retrieve the timestamp at which the HMAC key was created. - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the bucket's resource has not been loaded - from the server. - """ - value = self._properties.get("timeCreated") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def updated(self): - """Retrieve the timestamp at which the HMAC key was created. - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the bucket's resource has not been loaded - from the server. - """ - value = self._properties.get("updated") - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def path(self): - """Resource path for the metadata's key.""" - - if self.access_id is None: - raise ValueError("No 'access_id' set.") - - project = self.project - if project is None: - project = self._client.project - - return "/projects/{}/hmacKeys/{}".format(project, self.access_id) - - @property - def user_project(self): - """Project ID to be billed for API requests made via this bucket. - - This property is currently ignored by the server. - - :rtype: str - """ - return self._user_project - - def exists(self): - """Determine whether or not the key for this metadata exists. - - :rtype: bool - :returns: True if the key exists in Cloud Storage. - """ - try: - qs_params = {} - - if self.user_project is not None: - qs_params["userProject"] = self.user_project - - self._client._connection.api_request( - method="GET", path=self.path, query_params=qs_params - ) - except NotFound: - return False - else: - return True - - def reload(self): - """Reload properties from Cloud Storage. - - :raises :class:`~google.api_core.exceptions.NotFound`: - if the key does not exist on the back-end. - """ - qs_params = {} - - if self.user_project is not None: - qs_params["userProject"] = self.user_project - - self._properties = self._client._connection.api_request( - method="GET", path=self.path, query_params=qs_params - ) - - def update(self): - """Save writable properties to Cloud Storage. - - :raises :class:`~google.api_core.exceptions.NotFound`: - if the key does not exist on the back-end. - """ - qs_params = {} - if self.user_project is not None: - qs_params["userProject"] = self.user_project - - payload = {"state": self.state} - self._properties = self._client._connection.api_request( - method="PUT", path=self.path, data=payload, query_params=qs_params - ) - - def delete(self): - """Delete the key from Cloud Storage. - - :raises :class:`~google.api_core.exceptions.NotFound`: - if the key does not exist on the back-end. - """ - if self.state != self.INACTIVE_STATE: - raise ValueError("Cannot delete key if not in 'INACTIVE' state.") - - qs_params = {} - if self.user_project is not None: - qs_params["userProject"] = self.user_project - - self._client._connection.api_request( - method="DELETE", path=self.path, query_params=qs_params - ) diff --git a/storage/google/cloud/storage/iam.py b/storage/google/cloud/storage/iam.py deleted file mode 100644 index fb7e9e4ede3a..000000000000 --- a/storage/google/cloud/storage/iam.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Storage API IAM policy definitions - -For allowed roles / permissions, see: -https://cloud.google.com/storage/docs/access-control/iam -""" - -# Storage-specific IAM roles - -STORAGE_OBJECT_CREATOR_ROLE = "roles/storage.objectCreator" -"""Role implying rights to create objects, but not delete or overwrite them.""" - -STORAGE_OBJECT_VIEWER_ROLE = "roles/storage.objectViewer" -"""Role implying rights to view object properties, excluding ACLs.""" - -STORAGE_OBJECT_ADMIN_ROLE = "roles/storage.objectViewer" -"""Role implying full control of objects.""" - -STORAGE_ADMIN_ROLE = "roles/storage.admin" -"""Role implying full control of objects and buckets.""" - -STORAGE_VIEWER_ROLE = "Viewer" -"""Can list buckets.""" - -STORAGE_EDITOR_ROLE = "Editor" -"""Can create, list, and delete buckets.""" - -STORAGE_OWNER_ROLE = "Owners" -"""Can create, list, and delete buckets.""" - - -# Storage-specific permissions - -STORAGE_BUCKETS_CREATE = "storage.buckets.create" -"""Permission: create buckets.""" - -STORAGE_BUCKETS_DELETE = "storage.buckets.delete" -"""Permission: delete buckets.""" - -STORAGE_BUCKETS_GET = "storage.buckets.get" -"""Permission: read bucket metadata, excluding ACLs.""" - -STORAGE_BUCKETS_GET_IAM_POLICY = "storage.buckets.getIamPolicy" -"""Permission: read bucket ACLs.""" - -STORAGE_BUCKETS_LIST = "storage.buckets.list" -"""Permission: list buckets.""" - -STORAGE_BUCKETS_SET_IAM_POLICY = "storage.buckets.setIamPolicy" -"""Permission: update bucket ACLs.""" - -STORAGE_BUCKETS_UPDATE = "storage.buckets.list" -"""Permission: update buckets, excluding ACLS.""" - -STORAGE_OBJECTS_CREATE = "storage.objects.create" -"""Permission: add new objects to a bucket.""" - -STORAGE_OBJECTS_DELETE = "storage.objects.delete" -"""Permission: delete objects.""" - -STORAGE_OBJECTS_GET = "storage.objects.get" -"""Permission: read object data / metadata, excluding ACLs.""" - -STORAGE_OBJECTS_GET_IAM_POLICY = "storage.objects.getIamPolicy" -"""Permission: read object ACLs.""" - -STORAGE_OBJECTS_LIST = "storage.objects.list" -"""Permission: list objects in a bucket.""" - -STORAGE_OBJECTS_SET_IAM_POLICY = "storage.objects.setIamPolicy" -"""Permission: update object ACLs.""" - -STORAGE_OBJECTS_UPDATE = "storage.objects.update" -"""Permission: update object metadat, excluding ACLs.""" diff --git a/storage/google/cloud/storage/notification.py b/storage/google/cloud/storage/notification.py deleted file mode 100644 index 982dc16c04d6..000000000000 --- a/storage/google/cloud/storage/notification.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Support for bucket notification resources.""" - -import re - -from google.api_core.exceptions import NotFound - - -OBJECT_FINALIZE_EVENT_TYPE = "OBJECT_FINALIZE" -OBJECT_METADATA_UPDATE_EVENT_TYPE = "OBJECT_METADATA_UPDATE" -OBJECT_DELETE_EVENT_TYPE = "OBJECT_DELETE" -OBJECT_ARCHIVE_EVENT_TYPE = "OBJECT_ARCHIVE" - -JSON_API_V1_PAYLOAD_FORMAT = "JSON_API_V1" -NONE_PAYLOAD_FORMAT = "NONE" - -_TOPIC_REF_FMT = "//pubsub.googleapis.com/projects/{}/topics/{}" -_PROJECT_PATTERN = r"(?P[a-z][a-z0-9-]{4,28}[a-z0-9])" -_TOPIC_NAME_PATTERN = r"(?P[A-Za-z](\w|[-_.~+%])+)" -_TOPIC_REF_PATTERN = _TOPIC_REF_FMT.format(_PROJECT_PATTERN, _TOPIC_NAME_PATTERN) -_TOPIC_REF_RE = re.compile(_TOPIC_REF_PATTERN) -_BAD_TOPIC = ( - "Resource has invalid topic: {}; see " - "https://cloud.google.com/storage/docs/json_api/v1/" - "notifications/insert#topic" -) - - -class BucketNotification(object): - """Represent a single notification resource for a bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/notifications - - :type bucket: :class:`google.cloud.storage.bucket.Bucket` - :param bucket: Bucket to which the notification is bound. - - :type topic_name: str - :param topic_name: Topic name to which notifications are published. - - :type topic_project: str - :param topic_project: - (Optional) project ID of topic to which notifications are published. - If not passed, uses the project ID of the bucket's client. - - :type custom_attributes: dict - :param custom_attributes: - (Optional) additional attributes passed with notification events. - - :type event_types: list(str) - :param event_types: - (Optional) event types for which notificatin events are published. - - :type blob_name_prefix: str - :param blob_name_prefix: - (Optional) prefix of blob names for which notification events are - published.. - - :type payload_format: str - :param payload_format: - (Optional) format of payload for notification events. - """ - - def __init__( - self, - bucket, - topic_name, - topic_project=None, - custom_attributes=None, - event_types=None, - blob_name_prefix=None, - payload_format=NONE_PAYLOAD_FORMAT, - ): - self._bucket = bucket - self._topic_name = topic_name - - if topic_project is None: - topic_project = bucket.client.project - - if topic_project is None: - raise ValueError("Client project not set: pass an explicit topic_project.") - - self._topic_project = topic_project - - self._properties = {} - - if custom_attributes is not None: - self._properties["custom_attributes"] = custom_attributes - - if event_types is not None: - self._properties["event_types"] = event_types - - if blob_name_prefix is not None: - self._properties["object_name_prefix"] = blob_name_prefix - - self._properties["payload_format"] = payload_format - - @classmethod - def from_api_repr(cls, resource, bucket): - """Construct an instance from the JSON repr returned by the server. - - See: https://cloud.google.com/storage/docs/json_api/v1/notifications - - :type resource: dict - :param resource: JSON repr of the notification - - :type bucket: :class:`google.cloud.storage.bucket.Bucket` - :param bucket: Bucket to which the notification is bound. - - :rtype: :class:`BucketNotification` - :returns: the new notification instance - """ - topic_path = resource.get("topic") - if topic_path is None: - raise ValueError("Resource has no topic") - - name, project = _parse_topic_path(topic_path) - instance = cls(bucket, name, topic_project=project) - instance._properties = resource - - return instance - - @property - def bucket(self): - """Bucket to which the notification is bound.""" - return self._bucket - - @property - def topic_name(self): - """Topic name to which notifications are published.""" - return self._topic_name - - @property - def topic_project(self): - """Project ID of topic to which notifications are published. - """ - return self._topic_project - - @property - def custom_attributes(self): - """Custom attributes passed with notification events. - """ - return self._properties.get("custom_attributes") - - @property - def event_types(self): - """Event types for which notification events are published. - """ - return self._properties.get("event_types") - - @property - def blob_name_prefix(self): - """Prefix of blob names for which notification events are published. - """ - return self._properties.get("object_name_prefix") - - @property - def payload_format(self): - """Format of payload of notification events.""" - return self._properties.get("payload_format") - - @property - def notification_id(self): - """Server-set ID of notification resource.""" - return self._properties.get("id") - - @property - def etag(self): - """Server-set ETag of notification resource.""" - return self._properties.get("etag") - - @property - def self_link(self): - """Server-set ETag of notification resource.""" - return self._properties.get("selfLink") - - @property - def client(self): - """The client bound to this notfication.""" - return self.bucket.client - - @property - def path(self): - """The URL path for this notification.""" - return "/b/{}/notificationConfigs/{}".format( - self.bucket.name, self.notification_id - ) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: the client to use. - - :rtype: :class:`google.cloud.storage.client.Client` - :returns: The client passed in or the bucket's client. - """ - if client is None: - client = self.client - return client - - def _set_properties(self, response): - """Helper for :meth:`reload`. - - :type response: dict - :param response: resource mapping from server - """ - self._properties.clear() - self._properties.update(response) - - def create(self, client=None): - """API wrapper: create the notification. - - See: - https://cloud.google.com/storage/docs/json_api/v1/notifications/insert - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` - :param client: (Optional) the client to use. If not passed, falls back - to the ``client`` stored on the notification's bucket. - """ - if self.notification_id is not None: - raise ValueError( - "Notification already exists w/ id: {}".format(self.notification_id) - ) - - client = self._require_client(client) - - query_params = {} - if self.bucket.user_project is not None: - query_params["userProject"] = self.bucket.user_project - - path = "/b/{}/notificationConfigs".format(self.bucket.name) - properties = self._properties.copy() - properties["topic"] = _TOPIC_REF_FMT.format(self.topic_project, self.topic_name) - self._properties = client._connection.api_request( - method="POST", path=path, query_params=query_params, data=properties - ) - - def exists(self, client=None): - """Test whether this notification exists. - - See: - https://cloud.google.com/storage/docs/json_api/v1/notifications/get - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: bool - :returns: True, if the notification exists, else False. - :raises ValueError: if the notification has no ID. - """ - if self.notification_id is None: - raise ValueError("Notification not intialized by server") - - client = self._require_client(client) - - query_params = {} - if self.bucket.user_project is not None: - query_params["userProject"] = self.bucket.user_project - - try: - client._connection.api_request( - method="GET", path=self.path, query_params=query_params - ) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """Update this notification from the server configuration. - - See: - https://cloud.google.com/storage/docs/json_api/v1/notifications/get - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: bool - :returns: True, if the notification exists, else False. - :raises ValueError: if the notification has no ID. - """ - if self.notification_id is None: - raise ValueError("Notification not intialized by server") - - client = self._require_client(client) - - query_params = {} - if self.bucket.user_project is not None: - query_params["userProject"] = self.bucket.user_project - - response = client._connection.api_request( - method="GET", path=self.path, query_params=query_params - ) - self._set_properties(response) - - def delete(self, client=None): - """Delete this notification. - - See: - https://cloud.google.com/storage/docs/json_api/v1/notifications/delete - - If :attr:`user_project` is set on the bucket, bills the API request - to that project. - - :type client: :class:`~google.cloud.storage.client.Client` or - ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`google.api_core.exceptions.NotFound`: - if the notification does not exist. - :raises ValueError: if the notification has no ID. - """ - if self.notification_id is None: - raise ValueError("Notification not intialized by server") - - client = self._require_client(client) - - query_params = {} - if self.bucket.user_project is not None: - query_params["userProject"] = self.bucket.user_project - - client._connection.api_request( - method="DELETE", path=self.path, query_params=query_params - ) - - -def _parse_topic_path(topic_path): - """Verify that a topic path is in the correct format. - - .. _resource manager docs: https://cloud.google.com/resource-manager/\ - reference/rest/v1beta1/projects#\ - Project.FIELDS.project_id - .. _topic spec: https://cloud.google.com/storage/docs/json_api/v1/\ - notifications/insert#topic - - Expected to be of the form: - - //pubsub.googleapis.com/projects/{project}/topics/{topic} - - where the ``project`` value must be "6 to 30 lowercase letters, digits, - or hyphens. It must start with a letter. Trailing hyphens are prohibited." - (see `resource manager docs`_) and ``topic`` must have length at least two, - must start with a letter and may only contain alphanumeric characters or - ``-``, ``_``, ``.``, ``~``, ``+`` or ``%`` (i.e characters used for URL - encoding, see `topic spec`_). - - Args: - topic_path (str): The topic path to be verified. - - Returns: - Tuple[str, str]: The ``project`` and ``topic`` parsed from the - ``topic_path``. - - Raises: - ValueError: If the topic path is invalid. - """ - match = _TOPIC_REF_RE.match(topic_path) - if match is None: - raise ValueError(_BAD_TOPIC.format(topic_path)) - - return match.group("name"), match.group("project") diff --git a/storage/noxfile.py b/storage/noxfile.py deleted file mode 100644 index d4df55d0b1dc..000000000000 --- a/storage/noxfile.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -import os -import shutil - -import nox - - -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) -BLACK_VERSION = "black==19.3b0" -BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] - -if os.path.exists("samples"): - BLACK_PATHS.append("samples") - - -@nox.session(python="3.7") -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION, *LOCAL_DEPS) - session.run("black", "--check", *BLACK_PATHS) - session.run("flake8", "google", "tests") - - -@nox.session(python="3.6") -def blacken(session): - """Run black. - - Format code to uniform standard. - - This currently uses Python 3.6 due to the automated Kokoro run of synthtool. - That run uses an image that doesn't have 3.6 installed. Before updating this - check the state of the `gcp_ubuntu_config` we use for that Kokoro run. - """ - session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) - - -@nox.session(python="3.7") -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "Pygments") - session.run("python", "setup.py", "check", "--restructuredtext", "--strict") - - -@nox.session -def default(session): - """Default unit test session. - - This is intended to be run **without** an interpreter set, so - that the current ``python`` (on the ``PATH``) or the version of - Python corresponding to the ``nox`` binary the ``PATH`` can - run the tests. - """ - # Install all test dependencies, then install local packages in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", ".") - - # Run py.test against the unit tests. - session.run( - "py.test", - "--quiet", - "--cov=google.cloud.storage", - "--cov=tests.unit", - "--cov-append", - "--cov-config=.coveragerc", - "--cov-report=", - "--cov-fail-under=97", - "tests/unit", - *session.posargs - ) - - -@nox.session(python=["2.7", "3.5", "3.6", "3.7"]) -def unit(session): - """Run the unit test suite.""" - default(session) - - -@nox.session(python=["2.7", "3.6"]) -def system(session): - """Run the system test suite.""" - - # Sanity check: Only run system tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable.") - - # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") - - # Install all test dependencies, then install local packages in-place. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - systest_deps = ["../test_utils/", "../pubsub", "../kms", "../iam"] - for systest_dep in systest_deps: - session.install("-e", systest_dep) - session.install("-e", ".") - - # Run py.test against the system tests. - session.run("py.test", "--quiet", "tests/system.py", *session.posargs) - - -@nox.session(python="3.6") -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - session.run("coverage", "erase") - - -@nox.session(python="3.7") -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/storage/pylint.config.py b/storage/pylint.config.py deleted file mode 100644 index 5d64b9d2f256..000000000000 --- a/storage/pylint.config.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module is used to configure gcp-devrel-py-tools run-pylint.""" - -# Library configuration - -# library_additions = {} -# library_replacements = {} - -# Test configuration - -# test_additions = copy.deepcopy(library_additions) -# test_replacements = copy.deepcopy(library_replacements) diff --git a/storage/setup.cfg b/storage/setup.cfg deleted file mode 100644 index 2a9acf13daa9..000000000000 --- a/storage/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[bdist_wheel] -universal = 1 diff --git a/storage/setup.py b/storage/setup.py deleted file mode 100644 index 50d526f2ae4e..000000000000 --- a/storage/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import os - -import setuptools - - -# Package metadata. - -name = "google-cloud-storage" -description = "Google Cloud Storage API client library" -version = "1.25.0" -# Should be one of: -# 'Development Status :: 3 - Alpha' -# 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Production/Stable' -release_status = "Development Status :: 5 - Production/Stable" -dependencies = [ - "google-auth >= 1.9.0, < 2.0dev", - "google-cloud-core >= 1.2.0, < 2.0dev", - "google-resumable-media >= 0.5.0, < 0.6dev", -] -extras = {} - - -# Setup boilerplate below this line. - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -# Only include packages under the 'google' namespace. Do not include tests, -# benchmarks, etc. -packages = [ - package for package in setuptools.find_packages() if package.startswith("google") -] - -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url="https://github.com/GoogleCloudPlatform/google-cloud-python", - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - namespace_packages=namespaces, - install_requires=dependencies, - extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", - include_package_data=True, - zip_safe=False, -) diff --git a/storage/tests/__init__.py b/storage/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/storage/tests/data/CloudPlatform_128px_Retina.png b/storage/tests/data/CloudPlatform_128px_Retina.png deleted file mode 100644 index 86c04e4b44f4..000000000000 Binary files a/storage/tests/data/CloudPlatform_128px_Retina.png and /dev/null differ diff --git a/storage/tests/data/five-point-one-mb-file.zip b/storage/tests/data/five-point-one-mb-file.zip deleted file mode 100644 index a399081e8dd2..000000000000 Binary files a/storage/tests/data/five-point-one-mb-file.zip and /dev/null differ diff --git a/storage/tests/data/simple.txt b/storage/tests/data/simple.txt deleted file mode 100644 index 8a03e0e55f2b..000000000000 --- a/storage/tests/data/simple.txt +++ /dev/null @@ -1 +0,0 @@ -This is a simple text file. diff --git a/storage/tests/perf/README.md b/storage/tests/perf/README.md deleted file mode 100644 index e77589f610d3..000000000000 --- a/storage/tests/perf/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# storage benchwrapp - -main.py is a gRPC wrapper around the storage library for benchmarking purposes. - -## Running - -```bash -$ export STORAGE_EMULATOR_HOST=http://localhost:8080 -$ pip install grpcio -$ cd storage -$ pip install -e . # install google.cloud.storage locally -$ cd tests/perf -$ python3 benchwrapper.py --port 8081 -``` - -## Re-generating protos - -```bash -$ pip install grpcio-tools -$ python -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. *.proto -``` diff --git a/storage/tests/perf/benchwrapper.py b/storage/tests/perf/benchwrapper.py deleted file mode 100644 index 9ebb3f455839..000000000000 --- a/storage/tests/perf/benchwrapper.py +++ /dev/null @@ -1,54 +0,0 @@ -import argparse -import sys -import time -import grpc -import os -from concurrent import futures -import storage_pb2_grpc -import storage_pb2 -from google.cloud import storage - -_ONE_DAY_IN_SECONDS = 60 * 60 * 24 - -parser = argparse.ArgumentParser() - -if os.environ.get("STORAGE_EMULATOR_HOST") is None: - sys.exit( - "This benchmarking server only works when connected to an emulator. Please set STORAGE_EMULATOR_HOST." - ) - -parser.add_argument("--port", help="The port to run on.") - -args = parser.parse_args() - -if args.port is None: - sys.exit("Usage: python3 main.py --port 8081") - -client = storage.Client.create_anonymous_client() - - -class StorageBenchWrapperServicer(storage_pb2_grpc.StorageBenchWrapperServicer): - def Write(self, request, context): - # TODO(deklerk): implement this - return storage_pb2.EmptyResponse() - - def Read(self, request, context): - bucket = client.bucket(request.bucketName) - blob = storage.Blob(request.objectName, bucket) - blob.download_as_string() - return storage_pb2.EmptyResponse() - - -server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) -storage_pb2_grpc.add_StorageBenchWrapperServicer_to_server( - StorageBenchWrapperServicer(), server -) - -print("listening on localhost:" + args.port) -server.add_insecure_port("[::]:" + args.port) -server.start() -try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) -except KeyboardInterrupt: - server.stop(0) diff --git a/storage/tests/perf/storage.proto b/storage/tests/perf/storage.proto deleted file mode 100644 index 055e7e7867c0..000000000000 --- a/storage/tests/perf/storage.proto +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package storage_bench; - -message ObjectRead{ - // The bucket string identifier. - string bucketName = 1; - // The object/blob string identifier. - string objectName = 2; -} - -message ObjectWrite{ - // The bucket string identifier. - string bucketName = 1; - // The object/blob string identifiers. - string objectName = 2; - // The string containing the upload file path. - string destination = 3; -} - -message EmptyResponse{ -} - -service StorageBenchWrapper{ - // Performs an upload from a specific object. - rpc Write(ObjectWrite) returns (EmptyResponse) {} - // Read a specific object. - rpc Read(ObjectRead) returns (EmptyResponse){} -} \ No newline at end of file diff --git a/storage/tests/perf/storage_pb2.py b/storage/tests/perf/storage_pb2.py deleted file mode 100644 index 59ea52f919d0..000000000000 --- a/storage/tests/perf/storage_pb2.py +++ /dev/null @@ -1,252 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: storage.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="storage.proto", - package="storage_bench", - syntax="proto3", - serialized_options=None, - serialized_pb=_b( - '\n\rstorage.proto\x12\rstorage_bench"4\n\nObjectRead\x12\x12\n\nbucketName\x18\x01 \x01(\t\x12\x12\n\nobjectName\x18\x02 \x01(\t"J\n\x0bObjectWrite\x12\x12\n\nbucketName\x18\x01 \x01(\t\x12\x12\n\nobjectName\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65stination\x18\x03 \x01(\t"\x0f\n\rEmptyResponse2\x9d\x01\n\x13StorageBenchWrapper\x12\x43\n\x05Write\x12\x1a.storage_bench.ObjectWrite\x1a\x1c.storage_bench.EmptyResponse"\x00\x12\x41\n\x04Read\x12\x19.storage_bench.ObjectRead\x1a\x1c.storage_bench.EmptyResponse"\x00\x62\x06proto3' - ), -) - - -_OBJECTREAD = _descriptor.Descriptor( - name="ObjectRead", - full_name="storage_bench.ObjectRead", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="bucketName", - full_name="storage_bench.ObjectRead.bucketName", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="objectName", - full_name="storage_bench.ObjectRead.objectName", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=32, - serialized_end=84, -) - - -_OBJECTWRITE = _descriptor.Descriptor( - name="ObjectWrite", - full_name="storage_bench.ObjectWrite", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="bucketName", - full_name="storage_bench.ObjectWrite.bucketName", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="objectName", - full_name="storage_bench.ObjectWrite.objectName", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="destination", - full_name="storage_bench.ObjectWrite.destination", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=86, - serialized_end=160, -) - - -_EMPTYRESPONSE = _descriptor.Descriptor( - name="EmptyResponse", - full_name="storage_bench.EmptyResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=162, - serialized_end=177, -) - -DESCRIPTOR.message_types_by_name["ObjectRead"] = _OBJECTREAD -DESCRIPTOR.message_types_by_name["ObjectWrite"] = _OBJECTWRITE -DESCRIPTOR.message_types_by_name["EmptyResponse"] = _EMPTYRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ObjectRead = _reflection.GeneratedProtocolMessageType( - "ObjectRead", - (_message.Message,), - { - "DESCRIPTOR": _OBJECTREAD, - "__module__": "storage_pb2" - # @@protoc_insertion_point(class_scope:storage_bench.ObjectRead) - }, -) -_sym_db.RegisterMessage(ObjectRead) - -ObjectWrite = _reflection.GeneratedProtocolMessageType( - "ObjectWrite", - (_message.Message,), - { - "DESCRIPTOR": _OBJECTWRITE, - "__module__": "storage_pb2" - # @@protoc_insertion_point(class_scope:storage_bench.ObjectWrite) - }, -) -_sym_db.RegisterMessage(ObjectWrite) - -EmptyResponse = _reflection.GeneratedProtocolMessageType( - "EmptyResponse", - (_message.Message,), - { - "DESCRIPTOR": _EMPTYRESPONSE, - "__module__": "storage_pb2" - # @@protoc_insertion_point(class_scope:storage_bench.EmptyResponse) - }, -) -_sym_db.RegisterMessage(EmptyResponse) - - -_STORAGEBENCHWRAPPER = _descriptor.ServiceDescriptor( - name="StorageBenchWrapper", - full_name="storage_bench.StorageBenchWrapper", - file=DESCRIPTOR, - index=0, - serialized_options=None, - serialized_start=180, - serialized_end=337, - methods=[ - _descriptor.MethodDescriptor( - name="Write", - full_name="storage_bench.StorageBenchWrapper.Write", - index=0, - containing_service=None, - input_type=_OBJECTWRITE, - output_type=_EMPTYRESPONSE, - serialized_options=None, - ), - _descriptor.MethodDescriptor( - name="Read", - full_name="storage_bench.StorageBenchWrapper.Read", - index=1, - containing_service=None, - input_type=_OBJECTREAD, - output_type=_EMPTYRESPONSE, - serialized_options=None, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_STORAGEBENCHWRAPPER) - -DESCRIPTOR.services_by_name["StorageBenchWrapper"] = _STORAGEBENCHWRAPPER - -# @@protoc_insertion_point(module_scope) diff --git a/storage/tests/perf/storage_pb2_grpc.py b/storage/tests/perf/storage_pb2_grpc.py deleted file mode 100644 index 1b3a2c82f50b..000000000000 --- a/storage/tests/perf/storage_pb2_grpc.py +++ /dev/null @@ -1,64 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -import storage_pb2 as storage__pb2 - - -class StorageBenchWrapperStub(object): - # missing associated documentation comment in .proto file - pass - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Write = channel.unary_unary( - "/storage_bench.StorageBenchWrapper/Write", - request_serializer=storage__pb2.ObjectWrite.SerializeToString, - response_deserializer=storage__pb2.EmptyResponse.FromString, - ) - self.Read = channel.unary_unary( - "/storage_bench.StorageBenchWrapper/Read", - request_serializer=storage__pb2.ObjectRead.SerializeToString, - response_deserializer=storage__pb2.EmptyResponse.FromString, - ) - - -class StorageBenchWrapperServicer(object): - # missing associated documentation comment in .proto file - pass - - def Write(self, request, context): - """Performs an upload from a specific object. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Read(self, request, context): - """Read a specific object. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_StorageBenchWrapperServicer_to_server(servicer, server): - rpc_method_handlers = { - "Write": grpc.unary_unary_rpc_method_handler( - servicer.Write, - request_deserializer=storage__pb2.ObjectWrite.FromString, - response_serializer=storage__pb2.EmptyResponse.SerializeToString, - ), - "Read": grpc.unary_unary_rpc_method_handler( - servicer.Read, - request_deserializer=storage__pb2.ObjectRead.FromString, - response_serializer=storage__pb2.EmptyResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "storage_bench.StorageBenchWrapper", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/storage/tests/system.py b/storage/tests/system.py deleted file mode 100644 index 66c565cdfc42..000000000000 --- a/storage/tests/system.py +++ /dev/null @@ -1,1935 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import datetime -import gzip -import hashlib -import io -import os -import re -import tempfile -import time -import unittest - -import requests -import six - -from google.cloud import exceptions -from google.cloud import iam_credentials_v1 -from google.cloud import storage -from google.cloud.storage._helpers import _base64_md5hash -from google.cloud.storage.bucket import LifecycleRuleDelete -from google.cloud.storage.bucket import LifecycleRuleSetStorageClass -from google.cloud import kms -import google.oauth2 -from test_utils.retry import RetryErrors -from test_utils.system import unique_resource_id -from test_utils.vpcsc_config import vpcsc_config - - -USER_PROJECT = os.environ.get("GOOGLE_CLOUD_TESTS_USER_PROJECT") - - -def _bad_copy(bad_request): - """Predicate: pass only exceptions for a failed copyTo.""" - err_msg = bad_request.message - return err_msg.startswith("No file found in request. (POST") and "copyTo" in err_msg - - -retry_429 = RetryErrors(exceptions.TooManyRequests, max_tries=6) -retry_429_harder = RetryErrors(exceptions.TooManyRequests, max_tries=10) -retry_429_503 = RetryErrors( - [exceptions.TooManyRequests, exceptions.ServiceUnavailable], max_tries=6 -) -retry_bad_copy = RetryErrors(exceptions.BadRequest, error_predicate=_bad_copy) - - -def _empty_bucket(bucket): - """Empty a bucket of all existing blobs (including multiple versions).""" - for blob in list(bucket.list_blobs(versions=True)): - try: - blob.delete() - except exceptions.NotFound: - pass - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - TEST_BUCKET = None - - -def setUpModule(): - Config.CLIENT = storage.Client() - bucket_name = "new" + unique_resource_id() - # In the **very** rare case the bucket name is reserved, this - # fails with a ConnectionError. - Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name) - Config.TEST_BUCKET.versioning_enabled = True - retry_429_503(Config.TEST_BUCKET.create)() - - -def tearDownModule(): - errors = (exceptions.Conflict, exceptions.TooManyRequests) - retry = RetryErrors(errors, max_tries=15) - retry(_empty_bucket)(Config.TEST_BUCKET) - retry(Config.TEST_BUCKET.delete)(force=True) - - -class TestClient(unittest.TestCase): - def setUp(self): - self.case_hmac_keys_to_delete = [] - - def tearDown(self): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - for hmac_key in self.case_hmac_keys_to_delete: - if hmac_key.state == HMACKeyMetadata.ACTIVE_STATE: - hmac_key.state = HMACKeyMetadata.INACTIVE_STATE - hmac_key.update() - if hmac_key.state == HMACKeyMetadata.INACTIVE_STATE: - retry_429_harder(hmac_key.delete)() - - def test_get_service_account_email(self): - domain = "gs-project-accounts.iam.gserviceaccount.com" - email = Config.CLIENT.get_service_account_email() - - new_style = re.compile(r"service-(?P[^@]+)@" + domain) - old_style = re.compile(r"{}@{}".format(Config.CLIENT.project, domain)) - patterns = [new_style, old_style] - matches = [pattern.match(email) for pattern in patterns] - - self.assertTrue(any(match for match in matches if match is not None)) - - def test_hmac_key_crud(self): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - credentials = Config.CLIENT._credentials - email = credentials.service_account_email - - before_keys = set(Config.CLIENT.list_hmac_keys()) - - metadata, secret = Config.CLIENT.create_hmac_key(email) - self.case_hmac_keys_to_delete.append(metadata) - - self.assertIsInstance(secret, six.text_type) - self.assertEqual(len(secret), 40) - - after_keys = set(Config.CLIENT.list_hmac_keys()) - self.assertFalse(metadata in before_keys) - self.assertTrue(metadata in after_keys) - - another = HMACKeyMetadata(Config.CLIENT) - - another._properties["accessId"] = "nonesuch" - self.assertFalse(another.exists()) - - another._properties["accessId"] = metadata.access_id - self.assertTrue(another.exists()) - - another.reload() - - self.assertEqual(another._properties, metadata._properties) - - metadata.state = HMACKeyMetadata.INACTIVE_STATE - metadata.update() - - metadata.delete() - self.case_hmac_keys_to_delete.remove(metadata) - - -class TestStorageBuckets(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)() - - def test_create_bucket(self): - new_bucket_name = "a-new-bucket" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - created = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - - def test_bucket_create_w_alt_storage_class(self): - from google.cloud.storage import constants - - new_bucket_name = "bucket-w-archive" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.storage_class = constants.ARCHIVE_STORAGE_CLASS - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(new_bucket_name) - created = Config.CLIENT.get_bucket(new_bucket_name) - self.assertEqual(created.storage_class, constants.ARCHIVE_STORAGE_CLASS) - - def test_lifecycle_rules(self): - from google.cloud.storage import constants - - new_bucket_name = "w-lifcycle-rules" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.add_lifecycle_delete_rule(age=42) - bucket.add_lifecycle_set_storage_class_rule( - constants.COLDLINE_STORAGE_CLASS, - is_live=False, - matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], - ) - - expected_rules = [ - LifecycleRuleDelete(age=42), - LifecycleRuleSetStorageClass( - constants.COLDLINE_STORAGE_CLASS, - is_live=False, - matches_storage_class=[constants.NEARLINE_STORAGE_CLASS], - ), - ] - - retry_429_503(bucket.create)(location="us") - - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(bucket.name, new_bucket_name) - self.assertEqual(list(bucket.lifecycle_rules), expected_rules) - - bucket.clear_lifecyle_rules() - bucket.patch() - - self.assertEqual(list(bucket.lifecycle_rules), []) - - def test_list_buckets(self): - buckets_to_create = [ - "new" + unique_resource_id(), - "newer" + unique_resource_id(), - "newest" + unique_resource_id(), - ] - created_buckets = [] - for bucket_name in buckets_to_create: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(bucket_name) - - # Retrieve the buckets. - all_buckets = Config.CLIENT.list_buckets() - created_buckets = [ - bucket for bucket in all_buckets if bucket.name in buckets_to_create - ] - self.assertEqual(len(created_buckets), len(buckets_to_create)) - - def test_bucket_update_labels(self): - bucket_name = "update-labels" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - self.assertTrue(bucket.exists()) - - updated_labels = {"test-label": "label-value"} - bucket.labels = updated_labels - bucket.update() - self.assertEqual(bucket.labels, updated_labels) - - new_labels = {"another-label": "another-value"} - bucket.labels = new_labels - bucket.patch() - self.assertEqual(bucket.labels, new_labels) - - bucket.labels = {} - bucket.update() - self.assertEqual(bucket.labels, {}) - - def test_get_set_iam_policy(self): - import pytest - from google.cloud.storage.iam import STORAGE_OBJECT_VIEWER_ROLE - from google.api_core.exceptions import BadRequest, PreconditionFailed - - bucket_name = "iam-policy" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - self.case_buckets_to_delete.append(bucket_name) - self.assertTrue(bucket.exists()) - - policy_no_version = bucket.get_iam_policy() - self.assertEqual(policy_no_version.version, 1) - - policy = bucket.get_iam_policy(requested_policy_version=3) - self.assertEqual(policy, policy_no_version) - - member = "serviceAccount:{}".format(Config.CLIENT.get_service_account_email()) - - BINDING_W_CONDITION = { - "role": STORAGE_OBJECT_VIEWER_ROLE, - "members": {member}, - "condition": { - "title": "always-true", - "description": "test condition always-true", - "expression": "true", - }, - } - policy.bindings.append(BINDING_W_CONDITION) - - with pytest.raises( - PreconditionFailed, match="enable uniform bucket-level access" - ): - bucket.set_iam_policy(policy) - - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - bucket.patch() - - policy = bucket.get_iam_policy(requested_policy_version=3) - policy.bindings.append(BINDING_W_CONDITION) - - with pytest.raises(BadRequest, match="at least 3"): - bucket.set_iam_policy(policy) - - policy.version = 3 - returned_policy = bucket.set_iam_policy(policy) - self.assertEqual(returned_policy.version, 3) - self.assertEqual(returned_policy.bindings, policy.bindings) - - with pytest.raises( - BadRequest, match="cannot be less than the existing policy version" - ): - bucket.get_iam_policy() - with pytest.raises( - BadRequest, match="cannot be less than the existing policy version" - ): - bucket.get_iam_policy(requested_policy_version=2) - - fetched_policy = bucket.get_iam_policy(requested_policy_version=3) - self.assertEqual(fetched_policy.bindings, returned_policy.bindings) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_crud_bucket_with_requester_pays(self): - new_bucket_name = "w-requester-pays" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - # Bucket will be deleted in-line below. - self.case_buckets_to_delete.remove(new_bucket_name) - - try: - # Exercise 'buckets.get' w/ userProject. - self.assertTrue(with_user_project.exists()) - with_user_project.reload() - self.assertTrue(with_user_project.requester_pays) - - # Exercise 'buckets.patch' w/ userProject. - with_user_project.configure_website( - main_page_suffix="index.html", not_found_page="404.html" - ) - with_user_project.patch() - self.assertEqual( - with_user_project._properties["website"], - {"mainPageSuffix": "index.html", "notFoundPage": "404.html"}, - ) - - # Exercise 'buckets.update' w/ userProject. - new_labels = {"another-label": "another-value"} - with_user_project.labels = new_labels - with_user_project.update() - self.assertEqual(with_user_project.labels, new_labels) - - finally: - # Exercise 'buckets.delete' w/ userProject. - with_user_project.delete() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_bucket_acls_iam_with_user_project(self): - new_bucket_name = "acl-w-user-project" + unique_resource_id("-") - retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name, requester_pays=True) - self.case_buckets_to_delete.append(new_bucket_name) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - # Exercise bucket ACL w/ userProject - acl = with_user_project.acl - acl.reload() - acl.all().grant_read() - acl.save() - self.assertIn("READER", acl.all().get_roles()) - del acl.entities["allUsers"] - acl.save() - self.assertFalse(acl.has_entity("allUsers")) - - # Exercise default object ACL w/ userProject - doa = with_user_project.default_object_acl - doa.reload() - doa.all().grant_read() - doa.save() - self.assertIn("READER", doa.all().get_roles()) - - # Exercise IAM w/ userProject - test_permissions = ["storage.buckets.get"] - self.assertEqual( - with_user_project.test_iam_permissions(test_permissions), test_permissions - ) - - policy = with_user_project.get_iam_policy() - viewers = policy.setdefault("roles/storage.objectViewer", set()) - viewers.add(policy.all_users()) - with_user_project.set_iam_policy(policy) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_copy_existing_file_with_user_project(self): - new_bucket_name = "copy-w-requester-pays" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - to_delete = [] - blob = storage.Blob("simple", bucket=created) - blob.upload_from_string(b"DEADBEEF") - to_delete.append(blob) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - new_blob = retry_bad_copy(with_user_project.copy_blob)( - blob, with_user_project, "simple-copy" - ) - to_delete.append(new_blob) - - base_contents = blob.download_as_string() - copied_contents = new_blob.download_as_string() - self.assertEqual(base_contents, copied_contents) - finally: - for blob in to_delete: - retry_429_harder(blob.delete)() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_bucket_get_blob_with_user_project(self): - new_bucket_name = "w-requester-pays" + unique_resource_id("-") - data = b"DEADBEEF" - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(created.name, new_bucket_name) - self.assertTrue(created.requester_pays) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - self.assertIsNone(with_user_project.get_blob("nonesuch")) - to_add = created.blob("blob-name") - to_add.upload_from_string(data) - try: - found = with_user_project.get_blob("blob-name") - self.assertEqual(found.download_as_string(), data) - finally: - to_add.delete() - - -class TestStorageFiles(unittest.TestCase): - - DIRNAME = os.path.realpath(os.path.dirname(__file__)) - FILES = { - "logo": {"path": DIRNAME + "/data/CloudPlatform_128px_Retina.png"}, - "big": {"path": DIRNAME + "/data/five-point-one-mb-file.zip"}, - "simple": {"path": DIRNAME + "/data/simple.txt"}, - } - - @classmethod - def setUpClass(cls): - super(TestStorageFiles, cls).setUpClass() - for file_data in cls.FILES.values(): - with open(file_data["path"], "rb") as file_obj: - file_data["hash"] = _base64_md5hash(file_obj) - cls.bucket = Config.TEST_BUCKET - - def setUp(self): - self.case_blobs_to_delete = [] - - def tearDown(self): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in self.case_blobs_to_delete: - retry(blob.delete)() - - -class TestStorageWriteFiles(TestStorageFiles): - ENCRYPTION_KEY = "b23ff11bba187db8c37077e6af3b25b8" - - def test_large_file_write_from_stream(self): - blob = self.bucket.blob("LargeFile") - - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - def test_large_encrypted_file_write_from_stream(self): - blob = self.bucket.blob("LargeFile", encryption_key=self.ENCRYPTION_KEY) - - file_data = self.FILES["big"] - with open(file_data["path"], "rb") as file_obj: - blob.upload_from_file(file_obj) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - temp_filename = tempfile.mktemp() - with open(temp_filename, "wb") as file_obj: - blob.download_to_file(file_obj) - - with open(temp_filename, "rb") as file_obj: - md5_temp_hash = _base64_md5hash(file_obj) - - self.assertEqual(md5_temp_hash, file_data["hash"]) - - def test_small_file_write_from_filename(self): - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - md5_hash = blob.md5_hash - if not isinstance(md5_hash, six.binary_type): - md5_hash = md5_hash.encode("utf-8") - self.assertEqual(md5_hash, file_data["hash"]) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_crud_blob_w_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - blob = with_user_project.blob("SmallFile") - - file_data = self.FILES["simple"] - with open(file_data["path"], mode="rb") as to_read: - file_contents = to_read.read() - - # Exercise 'objects.insert' w/ userProject. - blob.upload_from_filename(file_data["path"]) - gen0 = blob.generation - - # Upload a second generation of the blob - blob.upload_from_string(b"gen1") - gen1 = blob.generation - - blob0 = with_user_project.blob("SmallFile", generation=gen0) - blob1 = with_user_project.blob("SmallFile", generation=gen1) - - # Exercise 'objects.get' w/ generation - self.assertEqual(with_user_project.get_blob(blob.name).generation, gen1) - self.assertEqual( - with_user_project.get_blob(blob.name, generation=gen0).generation, gen0 - ) - - try: - # Exercise 'objects.get' (metadata) w/ userProject. - self.assertTrue(blob.exists()) - blob.reload() - - # Exercise 'objects.get' (media) w/ userProject. - self.assertEqual(blob0.download_as_string(), file_contents) - self.assertEqual(blob1.download_as_string(), b"gen1") - - # Exercise 'objects.patch' w/ userProject. - blob0.content_language = "en" - blob0.patch() - self.assertEqual(blob0.content_language, "en") - self.assertIsNone(blob1.content_language) - - # Exercise 'objects.update' w/ userProject. - metadata = {"foo": "Foo", "bar": "Bar"} - blob0.metadata = metadata - blob0.update() - self.assertEqual(blob0.metadata, metadata) - self.assertIsNone(blob1.metadata) - finally: - # Exercise 'objects.delete' (metadata) w/ userProject. - blobs = with_user_project.list_blobs(prefix=blob.name, versions=True) - self.assertEqual([each.generation for each in blobs], [gen0, gen1]) - - blob0.delete() - blobs = with_user_project.list_blobs(prefix=blob.name, versions=True) - self.assertEqual([each.generation for each in blobs], [gen1]) - - blob1.delete() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_blob_acl_w_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - blob = with_user_project.blob("SmallFile") - - file_data = self.FILES["simple"] - - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - # Exercise bucket ACL w/ userProject - acl = blob.acl - acl.reload() - acl.all().grant_read() - acl.save() - self.assertIn("READER", acl.all().get_roles()) - del acl.entities["allUsers"] - acl.save() - self.assertFalse(acl.has_entity("allUsers")) - - def test_upload_blob_acl(self): - control = self.bucket.blob("logo") - control_data = self.FILES["logo"] - - blob = self.bucket.blob("SmallFile") - file_data = self.FILES["simple"] - - try: - control.upload_from_filename(control_data["path"]) - blob.upload_from_filename(file_data["path"], predefined_acl="publicRead") - finally: - self.case_blobs_to_delete.append(blob) - self.case_blobs_to_delete.append(control) - - control_acl = control.acl - self.assertNotIn("READER", control_acl.all().get_roles()) - acl = blob.acl - self.assertIn("READER", acl.all().get_roles()) - acl.all().revoke_read() - self.assertSequenceEqual(acl.all().get_roles(), set([])) - self.assertEqual(control_acl.all().get_roles(), acl.all().get_roles()) - - def test_write_metadata(self): - filename = self.FILES["logo"]["path"] - blob_name = os.path.basename(filename) - - blob = storage.Blob(blob_name, bucket=self.bucket) - blob.upload_from_filename(filename) - self.case_blobs_to_delete.append(blob) - - # NOTE: This should not be necessary. We should be able to pass - # it in to upload_file and also to upload_from_string. - blob.content_type = "image/png" - self.assertEqual(blob.content_type, "image/png") - - def test_direct_write_and_read_into_file(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - same_blob = self.bucket.blob("MyBuffer") - same_blob.reload() # Initialize properties. - temp_filename = tempfile.mktemp() - with open(temp_filename, "wb") as file_obj: - same_blob.download_to_file(file_obj) - - with open(temp_filename, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_copy_existing_file(self): - filename = self.FILES["logo"]["path"] - blob = storage.Blob("CloudLogo", bucket=self.bucket) - blob.upload_from_filename(filename) - self.case_blobs_to_delete.append(blob) - - new_blob = retry_bad_copy(self.bucket.copy_blob)( - blob, self.bucket, "CloudLogoCopy" - ) - self.case_blobs_to_delete.append(new_blob) - - base_contents = blob.download_as_string() - copied_contents = new_blob.download_as_string() - self.assertEqual(base_contents, copied_contents) - - def test_download_blob_w_uri(self): - blob = self.bucket.blob("MyBuffer") - file_contents = b"Hello World" - blob.upload_from_string(file_contents) - self.case_blobs_to_delete.append(blob) - - temp_filename = tempfile.mktemp() - with open(temp_filename, "wb") as file_obj: - Config.CLIENT.download_blob_to_file( - "gs://" + self.bucket.name + "/MyBuffer", file_obj - ) - - with open(temp_filename, "rb") as file_obj: - stored_contents = file_obj.read() - - self.assertEqual(file_contents, stored_contents) - - def test_upload_gzip_encoded_download_raw(self): - payload = b"DEADBEEF" * 1000 - raw_stream = io.BytesIO() - with gzip.GzipFile(fileobj=raw_stream, mode="wb") as gzip_stream: - gzip_stream.write(payload) - zipped = raw_stream.getvalue() - - blob = self.bucket.blob("test_gzipped.gz") - blob.content_encoding = "gzip" - blob.upload_from_file(raw_stream, rewind=True) - - expanded = blob.download_as_string() - self.assertEqual(expanded, payload) - - raw = blob.download_as_string(raw_download=True) - self.assertEqual(raw, zipped) - - -class TestUnicode(unittest.TestCase): - @vpcsc_config.skip_if_inside_vpcsc - def test_fetch_object_and_check_content(self): - client = storage.Client() - bucket = client.bucket("storage-library-test-bucket") - - # Note: These files are public. - # Normalization form C: a single character for e-acute; - # URL should end with Cafe%CC%81 - # Normalization Form D: an ASCII e followed by U+0301 combining - # character; URL should end with Caf%C3%A9 - test_data = { - u"Caf\u00e9": b"Normalization Form C", - u"Cafe\u0301": b"Normalization Form D", - } - for blob_name, file_contents in test_data.items(): - blob = bucket.blob(blob_name) - self.assertEqual(blob.name, blob_name) - self.assertEqual(blob.download_as_string(), file_contents) - - -class TestStorageListFiles(TestStorageFiles): - - FILENAMES = ("CloudLogo1", "CloudLogo2", "CloudLogo3") - - @classmethod - def setUpClass(cls): - super(TestStorageListFiles, cls).setUpClass() - # Make sure bucket empty before beginning. - _empty_bucket(cls.bucket) - - logo_path = cls.FILES["logo"]["path"] - blob = storage.Blob(cls.FILENAMES[0], bucket=cls.bucket) - blob.upload_from_filename(logo_path) - cls.suite_blobs_to_delete = [blob] - - # Copy main blob onto remaining in FILENAMES. - for filename in cls.FILENAMES[1:]: - new_blob = retry_bad_copy(cls.bucket.copy_blob)(blob, cls.bucket, filename) - cls.suite_blobs_to_delete.append(new_blob) - - @classmethod - def tearDownClass(cls): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in cls.suite_blobs_to_delete: - retry(blob.delete)() - - @RetryErrors(unittest.TestCase.failureException) - def test_list_files(self): - all_blobs = list(self.bucket.list_blobs()) - self.assertEqual( - sorted(blob.name for blob in all_blobs), sorted(self.FILENAMES) - ) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - @RetryErrors(unittest.TestCase.failureException) - def test_list_files_with_user_project(self): - with_user_project = Config.CLIENT.bucket( - self.bucket.name, user_project=USER_PROJECT - ) - all_blobs = list(with_user_project.list_blobs()) - self.assertEqual( - sorted(blob.name for blob in all_blobs), sorted(self.FILENAMES) - ) - - @RetryErrors(unittest.TestCase.failureException) - def test_paginate_files(self): - truncation_size = 1 - count = len(self.FILENAMES) - truncation_size - iterator = self.bucket.list_blobs(max_results=count) - page_iter = iterator.pages - - page1 = six.next(page_iter) - blobs = list(page1) - self.assertEqual(len(blobs), count) - self.assertIsNotNone(iterator.next_page_token) - # Technically the iterator is exhausted. - self.assertEqual(iterator.num_results, iterator.max_results) - # But we modify the iterator to continue paging after - # articially stopping after ``count`` items. - iterator.max_results = None - - page2 = six.next(page_iter) - last_blobs = list(page2) - self.assertEqual(len(last_blobs), truncation_size) - - -class TestStoragePseudoHierarchy(TestStorageFiles): - - FILENAMES = ( - "file01.txt", - "parent/file11.txt", - "parent/child/file21.txt", - "parent/child/file22.txt", - "parent/child/grand/file31.txt", - "parent/child/other/file32.txt", - ) - - @classmethod - def setUpClass(cls): - super(TestStoragePseudoHierarchy, cls).setUpClass() - # Make sure bucket empty before beginning. - _empty_bucket(cls.bucket) - - cls.suite_blobs_to_delete = [] - simple_path = cls.FILES["simple"]["path"] - for filename in cls.FILENAMES: - blob = storage.Blob(filename, bucket=cls.bucket) - blob.upload_from_filename(simple_path) - cls.suite_blobs_to_delete.append(blob) - - @classmethod - def tearDownClass(cls): - errors = (exceptions.TooManyRequests, exceptions.ServiceUnavailable) - retry = RetryErrors(errors, max_tries=6) - for blob in cls.suite_blobs_to_delete: - retry(blob.delete)() - - @RetryErrors(unittest.TestCase.failureException) - def test_blob_get_w_delimiter(self): - for filename in self.FILENAMES: - blob = self.bucket.blob(filename) - self.assertTrue(blob.exists(), filename) - - @RetryErrors(unittest.TestCase.failureException) - def test_root_level_w_delimiter(self): - iterator = self.bucket.list_blobs(delimiter="/") - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], ["file01.txt"]) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set(["parent/"])) - - @RetryErrors(unittest.TestCase.failureException) - def test_first_level(self): - iterator = self.bucket.list_blobs(delimiter="/", prefix="parent/") - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], ["parent/file11.txt"]) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set(["parent/child/"])) - - @RetryErrors(unittest.TestCase.failureException) - def test_second_level(self): - expected_names = ["parent/child/file21.txt", "parent/child/file22.txt"] - - iterator = self.bucket.list_blobs(delimiter="/", prefix="parent/child/") - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual([blob.name for blob in blobs], expected_names) - self.assertIsNone(iterator.next_page_token) - self.assertEqual( - iterator.prefixes, set(["parent/child/grand/", "parent/child/other/"]) - ) - - @RetryErrors(unittest.TestCase.failureException) - def test_third_level(self): - # Pseudo-hierarchy can be arbitrarily deep, subject to the limit - # of 1024 characters in the UTF-8 encoded name: - # https://cloud.google.com/storage/docs/bucketnaming#objectnames - # Exercise a layer deeper to illustrate this. - iterator = self.bucket.list_blobs(delimiter="/", prefix="parent/child/grand/") - page = six.next(iterator.pages) - blobs = list(page) - self.assertEqual( - [blob.name for blob in blobs], ["parent/child/grand/file31.txt"] - ) - self.assertIsNone(iterator.next_page_token) - self.assertEqual(iterator.prefixes, set()) - - -class TestStorageSignURLs(unittest.TestCase): - BLOB_CONTENT = b"This time for sure, Rocky!" - - @classmethod - def setUpClass(cls): - if ( - type(Config.CLIENT._credentials) - is not google.oauth2.service_account.Credentials - ): - cls.skipTest("Signing tests requires a service account credential") - - bucket_name = "gcp-signing" + unique_resource_id() - cls.bucket = retry_429_503(Config.CLIENT.create_bucket)(bucket_name) - cls.blob = cls.bucket.blob("README.txt") - cls.blob.upload_from_string(cls.BLOB_CONTENT) - - @classmethod - def tearDownClass(cls): - _empty_bucket(cls.bucket) - errors = (exceptions.Conflict, exceptions.TooManyRequests) - retry = RetryErrors(errors, max_tries=6) - retry(cls.bucket.delete)(force=True) - - @staticmethod - def _morph_expiration(version, expiration): - if expiration is not None: - return expiration - - if version == "v2": - return int(time.time()) + 10 - - return 10 - - def _create_signed_list_blobs_url_helper( - self, version, expiration=None, method="GET" - ): - expiration = self._morph_expiration(version, expiration) - - signed_url = self.bucket.generate_signed_url( - expiration=expiration, method=method, client=Config.CLIENT, version=version - ) - - response = requests.get(signed_url) - self.assertEqual(response.status_code, 200) - - def test_create_signed_list_blobs_url_v2(self): - self._create_signed_list_blobs_url_helper(version="v2") - - def test_create_signed_list_blobs_url_v2_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - - self._create_signed_list_blobs_url_helper(expiration=now + delta, version="v2") - - def test_create_signed_list_blobs_url_v4(self): - self._create_signed_list_blobs_url_helper(version="v4") - - def test_create_signed_list_blobs_url_v4_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - self._create_signed_list_blobs_url_helper(expiration=now + delta, version="v4") - - def _create_signed_read_url_helper( - self, - blob_name="LogoToSign.jpg", - method="GET", - version="v2", - payload=None, - expiration=None, - encryption_key=None, - service_account_email=None, - access_token=None, - ): - expiration = self._morph_expiration(version, expiration) - - if payload is not None: - blob = self.bucket.blob(blob_name, encryption_key=encryption_key) - blob.upload_from_string(payload) - else: - blob = self.blob - - signed_url = blob.generate_signed_url( - expiration=expiration, - method=method, - client=Config.CLIENT, - version=version, - service_account_email=None, - access_token=None, - ) - - headers = {} - - if encryption_key is not None: - headers["x-goog-encryption-algorithm"] = "AES256" - encoded_key = base64.b64encode(encryption_key).decode("utf-8") - headers["x-goog-encryption-key"] = encoded_key - key_hash = hashlib.sha256(encryption_key).digest() - key_hash = base64.b64encode(key_hash).decode("utf-8") - headers["x-goog-encryption-key-sha256"] = key_hash - - response = requests.get(signed_url, headers=headers) - self.assertEqual(response.status_code, 200) - if payload is not None: - self.assertEqual(response.content, payload) - else: - self.assertEqual(response.content, self.BLOB_CONTENT) - - def test_create_signed_read_url_v2(self): - self._create_signed_read_url_helper() - - def test_create_signed_read_url_v4(self): - self._create_signed_read_url_helper(version="v4") - - def test_create_signed_read_url_v2_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - - self._create_signed_read_url_helper(expiration=now + delta) - - def test_create_signed_read_url_v4_w_expiration(self): - now = datetime.datetime.utcnow() - delta = datetime.timedelta(seconds=10) - self._create_signed_read_url_helper(expiration=now + delta, version="v4") - - def test_create_signed_read_url_v2_lowercase_method(self): - self._create_signed_read_url_helper(method="get") - - def test_create_signed_read_url_v4_lowercase_method(self): - self._create_signed_read_url_helper(method="get", version="v4") - - def test_create_signed_read_url_v2_w_non_ascii_name(self): - self._create_signed_read_url_helper( - blob_name=u"Caf\xe9.txt", - payload=b"Test signed URL for blob w/ non-ASCII name", - ) - - def test_create_signed_read_url_v4_w_non_ascii_name(self): - self._create_signed_read_url_helper( - blob_name=u"Caf\xe9.txt", - payload=b"Test signed URL for blob w/ non-ASCII name", - version="v4", - ) - - def test_create_signed_read_url_v2_w_csek(self): - encryption_key = os.urandom(32) - self._create_signed_read_url_helper( - blob_name="v2-w-csek.txt", - payload=b"Test signed URL for blob w/ CSEK", - encryption_key=encryption_key, - ) - - def test_create_signed_read_url_v4_w_csek(self): - encryption_key = os.urandom(32) - self._create_signed_read_url_helper( - blob_name="v2-w-csek.txt", - payload=b"Test signed URL for blob w/ CSEK", - encryption_key=encryption_key, - version="v4", - ) - - def test_create_signed_read_url_v2_w_access_token(self): - client = iam_credentials_v1.IAMCredentialsClient() - service_account_email = Config.CLIENT._credentials.service_account_email - name = client.service_account_path("-", service_account_email) - scope = ["https://www.googleapis.com/auth/devstorage.read_write"] - response = client.generate_access_token(name, scope) - self._create_signed_read_url_helper( - service_account_email=service_account_email, - access_token=response.access_token, - ) - - def test_create_signed_read_url_v4_w_access_token(self): - client = iam_credentials_v1.IAMCredentialsClient() - service_account_email = Config.CLIENT._credentials.service_account_email - name = client.service_account_path("-", service_account_email) - scope = ["https://www.googleapis.com/auth/devstorage.read_write"] - response = client.generate_access_token(name, scope) - self._create_signed_read_url_helper( - version="v4", - service_account_email=service_account_email, - access_token=response.access_token, - ) - - def _create_signed_delete_url_helper(self, version="v2", expiration=None): - expiration = self._morph_expiration(version, expiration) - - blob = self.bucket.blob("DELETE_ME.txt") - blob.upload_from_string(b"DELETE ME!") - - signed_delete_url = blob.generate_signed_url( - expiration=expiration, - method="DELETE", - client=Config.CLIENT, - version=version, - ) - - response = requests.request("DELETE", signed_delete_url) - self.assertEqual(response.status_code, 204) - self.assertEqual(response.content, b"") - - self.assertFalse(blob.exists()) - - def test_create_signed_delete_url_v2(self): - self._create_signed_delete_url_helper() - - def test_create_signed_delete_url_v4(self): - self._create_signed_delete_url_helper(version="v4") - - def _signed_resumable_upload_url_helper(self, version="v2", expiration=None): - expiration = self._morph_expiration(version, expiration) - blob = self.bucket.blob("cruddy.txt") - payload = b"DEADBEEF" - - # Initiate the upload using a signed URL. - signed_resumable_upload_url = blob.generate_signed_url( - expiration=expiration, - method="RESUMABLE", - client=Config.CLIENT, - version=version, - ) - - post_headers = {"x-goog-resumable": "start"} - post_response = requests.post(signed_resumable_upload_url, headers=post_headers) - self.assertEqual(post_response.status_code, 201) - - # Finish uploading the body. - location = post_response.headers["Location"] - put_headers = {"content-length": str(len(payload))} - put_response = requests.put(location, headers=put_headers, data=payload) - self.assertEqual(put_response.status_code, 200) - - # Download using a signed URL and verify. - signed_download_url = blob.generate_signed_url( - expiration=expiration, method="GET", client=Config.CLIENT, version=version - ) - - get_response = requests.get(signed_download_url) - self.assertEqual(get_response.status_code, 200) - self.assertEqual(get_response.content, payload) - - # Finally, delete the blob using a signed URL. - signed_delete_url = blob.generate_signed_url( - expiration=expiration, - method="DELETE", - client=Config.CLIENT, - version=version, - ) - - delete_response = requests.delete(signed_delete_url) - self.assertEqual(delete_response.status_code, 204) - - def test_signed_resumable_upload_url_v2(self): - self._signed_resumable_upload_url_helper(version="v2") - - def test_signed_resumable_upload_url_v4(self): - self._signed_resumable_upload_url_helper(version="v4") - - -class TestStorageCompose(TestStorageFiles): - - FILES = {} - - def test_compose_create_new_blob(self): - SOURCE_1 = b"AAA\n" - source_1 = self.bucket.blob("source-1") - source_1.upload_from_string(SOURCE_1) - self.case_blobs_to_delete.append(source_1) - - SOURCE_2 = b"BBB\n" - source_2 = self.bucket.blob("source-2") - source_2.upload_from_string(SOURCE_2) - self.case_blobs_to_delete.append(source_2) - - destination = self.bucket.blob("destination") - destination.content_type = "text/plain" - destination.compose([source_1, source_2]) - self.case_blobs_to_delete.append(destination) - - composed = destination.download_as_string() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - - def test_compose_create_new_blob_wo_content_type(self): - SOURCE_1 = b"AAA\n" - source_1 = self.bucket.blob("source-1") - source_1.upload_from_string(SOURCE_1) - self.case_blobs_to_delete.append(source_1) - - SOURCE_2 = b"BBB\n" - source_2 = self.bucket.blob("source-2") - source_2.upload_from_string(SOURCE_2) - self.case_blobs_to_delete.append(source_2) - - destination = self.bucket.blob("destination") - - destination.compose([source_1, source_2]) - self.case_blobs_to_delete.append(destination) - - self.assertIsNone(destination.content_type) - composed = destination.download_as_string() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - - def test_compose_replace_existing_blob(self): - BEFORE = b"AAA\n" - original = self.bucket.blob("original") - original.content_type = "text/plain" - original.upload_from_string(BEFORE) - self.case_blobs_to_delete.append(original) - - TO_APPEND = b"BBB\n" - to_append = self.bucket.blob("to_append") - to_append.upload_from_string(TO_APPEND) - self.case_blobs_to_delete.append(to_append) - - original.compose([original, to_append]) - - composed = original.download_as_string() - self.assertEqual(composed, BEFORE + TO_APPEND) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_compose_with_user_project(self): - new_bucket_name = "compose-user-project" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - SOURCE_1 = b"AAA\n" - source_1 = created.blob("source-1") - source_1.upload_from_string(SOURCE_1) - - SOURCE_2 = b"BBB\n" - source_2 = created.blob("source-2") - source_2.upload_from_string(SOURCE_2) - - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - destination = with_user_project.blob("destination") - destination.content_type = "text/plain" - destination.compose([source_1, source_2]) - - composed = destination.download_as_string() - self.assertEqual(composed, SOURCE_1 + SOURCE_2) - finally: - retry_429_harder(created.delete)(force=True) - - -class TestStorageRewrite(TestStorageFiles): - - FILENAMES = ("file01.txt",) - - def test_rewrite_create_new_blob_add_encryption_key(self): - file_data = self.FILES["simple"] - - source = self.bucket.blob("source") - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_string() - - KEY = os.urandom(32) - dest = self.bucket.blob("dest", encryption_key=KEY) - token, rewritten, total = dest.rewrite(source) - self.case_blobs_to_delete.append(dest) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(source.download_as_string(), dest.download_as_string()) - - def test_rewrite_rotate_encryption_key(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - - SOURCE_KEY = os.urandom(32) - source = self.bucket.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_string() - - DEST_KEY = os.urandom(32) - dest = self.bucket.blob(BLOB_NAME, encryption_key=DEST_KEY) - token, rewritten, total = dest.rewrite(source) - # Not adding 'dest' to 'self.case_blobs_to_delete': it is the - # same object as 'source'. - - self.assertIsNone(token) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_string(), source_data) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_rewrite_add_key_with_user_project(self): - file_data = self.FILES["simple"] - new_bucket_name = "rewrite-key-up" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - source = with_user_project.blob("source") - source.upload_from_filename(file_data["path"]) - source_data = source.download_as_string() - - KEY = os.urandom(32) - dest = with_user_project.blob("dest", encryption_key=KEY) - token, rewritten, total = dest.rewrite(source) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(source.download_as_string(), dest.download_as_string()) - finally: - retry_429_harder(created.delete)(force=True) - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_rewrite_rotate_with_user_project(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - new_bucket_name = "rewrite-rotate-up" + unique_resource_id("-") - created = retry_429_503(Config.CLIENT.create_bucket)( - new_bucket_name, requester_pays=True - ) - try: - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - - SOURCE_KEY = os.urandom(32) - source = with_user_project.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - source_data = source.download_as_string() - - DEST_KEY = os.urandom(32) - dest = with_user_project.blob(BLOB_NAME, encryption_key=DEST_KEY) - token, rewritten, total = dest.rewrite(source) - - self.assertEqual(token, None) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_string(), source_data) - finally: - retry_429_harder(created.delete)(force=True) - - -class TestStorageUpdateStorageClass(TestStorageFiles): - def test_update_storage_class_small_file(self): - from google.cloud.storage import constants - - blob = self.bucket.blob("SmallFile") - - file_data = self.FILES["simple"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.NEARLINE_STORAGE_CLASS) - - blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.COLDLINE_STORAGE_CLASS) - - def test_update_storage_class_large_file(self): - from google.cloud.storage import constants - - blob = self.bucket.blob("BigFile") - - file_data = self.FILES["big"] - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - - blob.update_storage_class(constants.NEARLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.NEARLINE_STORAGE_CLASS) - - blob.update_storage_class(constants.COLDLINE_STORAGE_CLASS) - blob.reload() - self.assertEqual(blob.storage_class, constants.COLDLINE_STORAGE_CLASS) - - -class TestStorageNotificationCRUD(unittest.TestCase): - - topic = None - TOPIC_NAME = "notification" + unique_resource_id("-") - CUSTOM_ATTRIBUTES = {"attr1": "value1", "attr2": "value2"} - BLOB_NAME_PREFIX = "blob-name-prefix/" - - @property - def topic_path(self): - return "projects/{}/topics/{}".format(Config.CLIENT.project, self.TOPIC_NAME) - - def _initialize_topic(self): - try: - from google.cloud.pubsub_v1 import PublisherClient - except ImportError: - raise unittest.SkipTest("Cannot import pubsub") - self.publisher_client = PublisherClient() - retry_429(self.publisher_client.create_topic)(self.topic_path) - policy = self.publisher_client.get_iam_policy(self.topic_path) - binding = policy.bindings.add() - binding.role = "roles/pubsub.publisher" - binding.members.append( - "serviceAccount:{}".format(Config.CLIENT.get_service_account_email()) - ) - self.publisher_client.set_iam_policy(self.topic_path, policy) - - def setUp(self): - self.case_buckets_to_delete = [] - self._initialize_topic() - - def tearDown(self): - retry_429(self.publisher_client.delete_topic)(self.topic_path) - with Config.CLIENT.batch(): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)() - - @staticmethod - def event_types(): - from google.cloud.storage.notification import ( - OBJECT_FINALIZE_EVENT_TYPE, - OBJECT_DELETE_EVENT_TYPE, - ) - - return [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE] - - @staticmethod - def payload_format(): - from google.cloud.storage.notification import JSON_API_V1_PAYLOAD_FORMAT - - return JSON_API_V1_PAYLOAD_FORMAT - - def test_notification_minimal(self): - new_bucket_name = "notification-minimal" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - self.assertEqual(list(bucket.list_notifications()), []) - notification = bucket.notification(self.TOPIC_NAME) - retry_429_503(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - notifications = list(bucket.list_notifications()) - self.assertEqual(len(notifications), 1) - self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME) - finally: - notification.delete() - - def test_notification_explicit(self): - new_bucket_name = "notification-explicit" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - notification = bucket.notification( - self.TOPIC_NAME, - custom_attributes=self.CUSTOM_ATTRIBUTES, - event_types=self.event_types(), - blob_name_prefix=self.BLOB_NAME_PREFIX, - payload_format=self.payload_format(), - ) - retry_429_503(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, self.event_types()) - self.assertEqual(notification.blob_name_prefix, self.BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, self.payload_format()) - finally: - notification.delete() - - @unittest.skipUnless(USER_PROJECT, "USER_PROJECT not set in environment.") - def test_notification_w_user_project(self): - new_bucket_name = "notification-minimal" + unique_resource_id("-") - retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name, requester_pays=True) - self.case_buckets_to_delete.append(new_bucket_name) - with_user_project = Config.CLIENT.bucket( - new_bucket_name, user_project=USER_PROJECT - ) - self.assertEqual(list(with_user_project.list_notifications()), []) - notification = with_user_project.notification(self.TOPIC_NAME) - retry_429(notification.create)() - try: - self.assertTrue(notification.exists()) - self.assertIsNotNone(notification.notification_id) - notifications = list(with_user_project.list_notifications()) - self.assertEqual(len(notifications), 1) - self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME) - finally: - notification.delete() - - -class TestAnonymousClient(unittest.TestCase): - - PUBLIC_BUCKET = "gcp-public-data-landsat" - - @vpcsc_config.skip_if_inside_vpcsc - def test_access_to_public_bucket(self): - anonymous = storage.Client.create_anonymous_client() - bucket = anonymous.bucket(self.PUBLIC_BUCKET) - blob, = retry_429_503(bucket.list_blobs)(max_results=1) - with tempfile.TemporaryFile() as stream: - retry_429_503(blob.download_to_file)(stream) - - -class TestKMSIntegration(TestStorageFiles): - - FILENAMES = ("file01.txt",) - - KEYRING_NAME = "gcs-test" - KEY_NAME = "gcs-test" - ALT_KEY_NAME = "gcs-test-alternate" - - def _kms_key_name(self, key_name=None): - if key_name is None: - key_name = self.KEY_NAME - - return ("projects/{}/" "locations/{}/" "keyRings/{}/" "cryptoKeys/{}").format( - Config.CLIENT.project, - self.bucket.location.lower(), - self.KEYRING_NAME, - key_name, - ) - - @classmethod - def setUpClass(cls): - super(TestKMSIntegration, cls).setUpClass() - _empty_bucket(cls.bucket) - - def setUp(self): - super(TestKMSIntegration, self).setUp() - client = kms.KeyManagementServiceClient() - project = Config.CLIENT.project - location = self.bucket.location.lower() - keyring_name = self.KEYRING_NAME - purpose = kms.enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT - - # If the keyring doesn't exist create it. - keyring_path = client.key_ring_path(project, location, keyring_name) - - try: - client.get_key_ring(keyring_path) - except exceptions.NotFound: - parent = client.location_path(project, location) - client.create_key_ring(parent, keyring_name, {}) - - # Mark this service account as an owner of the new keyring - service_account = Config.CLIENT.get_service_account_email() - policy = { - "bindings": [ - { - "role": "roles/cloudkms.cryptoKeyEncrypterDecrypter", - "members": ["serviceAccount:" + service_account], - } - ] - } - client.set_iam_policy(keyring_path, policy) - - # Populate the keyring with the keys we use in the tests - key_names = [ - "gcs-test", - "gcs-test-alternate", - "explicit-kms-key-name", - "default-kms-key-name", - "override-default-kms-key-name", - "alt-default-kms-key-name", - ] - for key_name in key_names: - key_path = client.crypto_key_path(project, location, keyring_name, key_name) - try: - client.get_crypto_key(key_path) - except exceptions.NotFound: - key = {"purpose": purpose} - client.create_crypto_key(keyring_path, key_name, key) - - def test_blob_w_explicit_kms_key_name(self): - BLOB_NAME = "explicit-kms-key-name" - file_data = self.FILES["simple"] - kms_key_name = self._kms_key_name() - blob = self.bucket.blob(BLOB_NAME, kms_key_name=kms_key_name) - blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(blob) - with open(file_data["path"], "rb") as _file_data: - self.assertEqual(blob.download_as_string(), _file_data.read()) - # We don't know the current version of the key. - self.assertTrue(blob.kms_key_name.startswith(kms_key_name)) - - listed, = list(self.bucket.list_blobs()) - self.assertTrue(listed.kms_key_name.startswith(kms_key_name)) - - def test_bucket_w_default_kms_key_name(self): - BLOB_NAME = "default-kms-key-name" - OVERRIDE_BLOB_NAME = "override-default-kms-key-name" - ALT_BLOB_NAME = "alt-default-kms-key-name" - CLEARTEXT_BLOB_NAME = "cleartext" - - file_data = self.FILES["simple"] - - with open(file_data["path"], "rb") as _file_data: - contents = _file_data.read() - - kms_key_name = self._kms_key_name() - self.bucket.default_kms_key_name = kms_key_name - self.bucket.patch() - self.assertEqual(self.bucket.default_kms_key_name, kms_key_name) - - defaulted_blob = self.bucket.blob(BLOB_NAME) - defaulted_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(defaulted_blob) - - self.assertEqual(defaulted_blob.download_as_string(), contents) - # We don't know the current version of the key. - self.assertTrue(defaulted_blob.kms_key_name.startswith(kms_key_name)) - - alt_kms_key_name = self._kms_key_name(self.ALT_KEY_NAME) - - override_blob = self.bucket.blob( - OVERRIDE_BLOB_NAME, kms_key_name=alt_kms_key_name - ) - override_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(override_blob) - - self.assertEqual(override_blob.download_as_string(), contents) - # We don't know the current version of the key. - self.assertTrue(override_blob.kms_key_name.startswith(alt_kms_key_name)) - - self.bucket.default_kms_key_name = alt_kms_key_name - self.bucket.patch() - - alt_blob = self.bucket.blob(ALT_BLOB_NAME) - alt_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(alt_blob) - - self.assertEqual(alt_blob.download_as_string(), contents) - # We don't know the current version of the key. - self.assertTrue(alt_blob.kms_key_name.startswith(alt_kms_key_name)) - - self.bucket.default_kms_key_name = None - self.bucket.patch() - - cleartext_blob = self.bucket.blob(CLEARTEXT_BLOB_NAME) - cleartext_blob.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(cleartext_blob) - - self.assertEqual(cleartext_blob.download_as_string(), contents) - self.assertIsNone(cleartext_blob.kms_key_name) - - def test_rewrite_rotate_csek_to_cmek(self): - BLOB_NAME = "rotating-keys" - file_data = self.FILES["simple"] - - SOURCE_KEY = os.urandom(32) - source = self.bucket.blob(BLOB_NAME, encryption_key=SOURCE_KEY) - source.upload_from_filename(file_data["path"]) - self.case_blobs_to_delete.append(source) - source_data = source.download_as_string() - - kms_key_name = self._kms_key_name() - - # We can't verify it, but ideally we would check that the following - # URL was resolvable with our credentials - # KEY_URL = 'https://cloudkms.googleapis.com/v1/{}'.format( - # kms_key_name) - - dest = self.bucket.blob(BLOB_NAME, kms_key_name=kms_key_name) - token, rewritten, total = dest.rewrite(source) - - while token is not None: - token, rewritten, total = dest.rewrite(source, token=token) - - # Not adding 'dest' to 'self.case_blobs_to_delete': it is the - # same object as 'source'. - - self.assertIsNone(token) - self.assertEqual(rewritten, len(source_data)) - self.assertEqual(total, len(source_data)) - - self.assertEqual(dest.download_as_string(), source_data) - - -class TestRetentionPolicy(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)() - - def test_bucket_w_retention_period(self): - import datetime - from google.api_core import exceptions - - period_secs = 10 - - new_bucket_name = "w-retention-period" + unique_resource_id("-") - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.retention_period = period_secs - bucket.default_event_based_hold = False - bucket.patch() - - self.assertEqual(bucket.retention_period, period_secs) - self.assertIsInstance(bucket.retention_policy_effective_time, datetime.datetime) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - other = bucket.get_blob(blob_name) - - self.assertFalse(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsInstance(other.retention_expiration_time, datetime.datetime) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - bucket.retention_period = None - bucket.patch() - - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - other.reload() - - self.assertFalse(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsNone(other.retention_expiration_time) - - other.delete() - - def test_bucket_w_default_event_based_hold(self): - from google.api_core import exceptions - - new_bucket_name = "w-def-ebh" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.default_event_based_hold = True - bucket.patch() - - self.assertTrue(bucket.default_event_based_hold) - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.retention_policy_locked) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - other = bucket.get_blob(blob_name) - - self.assertTrue(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsNone(other.retention_expiration_time) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - other.event_based_hold = False - other.patch() - - other.delete() - - bucket.default_event_based_hold = False - bucket.patch() - - self.assertFalse(bucket.default_event_based_hold) - self.assertIsNone(bucket.retention_period) - self.assertIsNone(bucket.retention_policy_effective_time) - self.assertFalse(bucket.retention_policy_locked) - - blob.upload_from_string(payload) - self.assertFalse(other.event_based_hold) - self.assertFalse(other.temporary_hold) - self.assertIsNone(other.retention_expiration_time) - - blob.delete() - - def test_blob_w_temporary_hold(self): - from google.api_core import exceptions - - new_bucket_name = "w-tmp-hold" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - blob_name = "test-blob" - payload = b"DEADBEEF" - blob = bucket.blob(blob_name) - blob.upload_from_string(payload) - - other = bucket.get_blob(blob_name) - other.temporary_hold = True - other.patch() - - self.assertTrue(other.temporary_hold) - self.assertFalse(other.event_based_hold) - self.assertIsNone(other.retention_expiration_time) - - with self.assertRaises(exceptions.Forbidden): - other.delete() - - other.temporary_hold = False - other.patch() - - other.delete() - - def test_bucket_lock_retention_policy(self): - import datetime - from google.api_core import exceptions - - period_secs = 10 - - new_bucket_name = "loc-ret-policy" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - bucket.retention_period = period_secs - bucket.patch() - - self.assertEqual(bucket.retention_period, period_secs) - self.assertIsInstance(bucket.retention_policy_effective_time, datetime.datetime) - self.assertFalse(bucket.default_event_based_hold) - self.assertFalse(bucket.retention_policy_locked) - - bucket.lock_retention_policy() - - bucket.reload() - self.assertTrue(bucket.retention_policy_locked) - - bucket.retention_period = None - with self.assertRaises(exceptions.Forbidden): - bucket.patch() - - -class TestIAMConfiguration(unittest.TestCase): - def setUp(self): - self.case_buckets_to_delete = [] - - def tearDown(self): - for bucket_name in self.case_buckets_to_delete: - bucket = Config.CLIENT.bucket(bucket_name) - retry_429_harder(bucket.delete)(force=True) - - def test_new_bucket_w_ubla(self): - new_bucket_name = "new-w-ubla" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = Config.CLIENT.bucket(new_bucket_name) - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - retry_429_503(bucket.create)() - self.case_buckets_to_delete.append(new_bucket_name) - - bucket_acl = bucket.acl - with self.assertRaises(exceptions.BadRequest): - bucket_acl.reload() - - bucket_acl.loaded = True # Fake that we somehow loaded the ACL - bucket_acl.all().grant_read() - with self.assertRaises(exceptions.BadRequest): - bucket_acl.save() - - blob_name = "my-blob.txt" - blob = bucket.blob(blob_name) - payload = b"DEADBEEF" - blob.upload_from_string(payload) - - found = bucket.get_blob(blob_name) - self.assertEqual(found.download_as_string(), payload) - - blob_acl = blob.acl - with self.assertRaises(exceptions.BadRequest): - blob_acl.reload() - - blob_acl.loaded = True # Fake that we somehow loaded the ACL - blob_acl.all().grant_read() - with self.assertRaises(exceptions.BadRequest): - blob_acl.save() - - def test_ubla_set_unset_preserves_acls(self): - new_bucket_name = "ubla-acls" + unique_resource_id("-") - self.assertRaises( - exceptions.NotFound, Config.CLIENT.get_bucket, new_bucket_name - ) - bucket = retry_429_503(Config.CLIENT.create_bucket)(new_bucket_name) - self.case_buckets_to_delete.append(new_bucket_name) - - blob_name = "my-blob.txt" - blob = bucket.blob(blob_name) - payload = b"DEADBEEF" - blob.upload_from_string(payload) - - # Preserve ACLs before setting UBLA - bucket_acl_before = list(bucket.acl) - blob_acl_before = list(bucket.acl) - - # Set UBLA - bucket.iam_configuration.uniform_bucket_level_access_enabled = True - bucket.patch() - - self.assertTrue(bucket.iam_configuration.uniform_bucket_level_access_enabled) - - # While UBLA is set, cannot get / set ACLs - with self.assertRaises(exceptions.BadRequest): - bucket.acl.reload() - - # Clear UBLA - bucket.iam_configuration.uniform_bucket_level_access_enabled = False - bucket.patch() - - # Query ACLs after clearing UBLA - bucket.acl.reload() - bucket_acl_after = list(bucket.acl) - blob.acl.reload() - blob_acl_after = list(bucket.acl) - - self.assertEqual(bucket_acl_before, bucket_acl_after) - self.assertEqual(blob_acl_before, blob_acl_after) diff --git a/storage/tests/unit/__init__.py b/storage/tests/unit/__init__.py deleted file mode 100644 index df379f1e9d88..000000000000 --- a/storage/tests/unit/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/storage/tests/unit/test__helpers.py b/storage/tests/unit/test__helpers.py deleted file mode 100644 index 9b75b0e67fbe..000000000000 --- a/storage/tests/unit/test__helpers.py +++ /dev/null @@ -1,380 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class Test__get_storage_host(unittest.TestCase): - @staticmethod - def _call_fut(): - from google.cloud.storage._helpers import _get_storage_host - - return _get_storage_host() - - def test_wo_env_var(self): - from google.cloud.storage._helpers import _DEFAULT_STORAGE_HOST - - with mock.patch("os.environ", {}): - host = self._call_fut() - - self.assertEqual(host, _DEFAULT_STORAGE_HOST) - - def test_w_env_var(self): - from google.cloud.storage._helpers import STORAGE_EMULATOR_ENV_VAR - - HOST = "https://api.example.com" - - with mock.patch("os.environ", {STORAGE_EMULATOR_ENV_VAR: HOST}): - host = self._call_fut() - - self.assertEqual(host, HOST) - - -class Test_PropertyMixin(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage._helpers import _PropertyMixin - - return _PropertyMixin - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def _derivedClass(self, path=None, user_project=None): - class Derived(self._get_target_class()): - - client = None - - @property - def path(self): - return path - - @property - def user_project(self): - return user_project - - return Derived - - def test_path_is_abstract(self): - mixin = self._make_one() - with self.assertRaises(NotImplementedError): - mixin.path - - def test_client_is_abstract(self): - mixin = self._make_one() - with self.assertRaises(NotImplementedError): - mixin.client - - def test_user_project_is_abstract(self): - mixin = self._make_one() - with self.assertRaises(NotImplementedError): - mixin.user_project - - def test__encryption_headers(self): - mixin = self._make_one() - self.assertEqual(mixin._encryption_headers(), {}) - - def test__query_params_wo_user_project(self): - derived = self._derivedClass("/path", None)() - self.assertEqual(derived._query_params, {}) - - def test__query_params_w_user_project(self): - user_project = "user-project-123" - derived = self._derivedClass("/path", user_project)() - self.assertEqual(derived._query_params, {"userProject": user_project}) - - def test_reload(self): - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path")() - # Make sure changes is not a set instance before calling reload - # (which will clear / replace it with an empty set), checked below. - derived._changes = object() - derived.reload(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "/path", - "query_params": {"projection": "noAcl"}, - "headers": {}, - "_target_object": derived, - }, - ) - self.assertEqual(derived._changes, set()) - - def test_reload_w_user_project(self): - user_project = "user-project-123" - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path", user_project)() - # Make sure changes is not a set instance before calling reload - # (which will clear / replace it with an empty set), checked below. - derived._changes = object() - derived.reload(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "/path", - "query_params": {"projection": "noAcl", "userProject": user_project}, - "headers": {}, - "_target_object": derived, - }, - ) - self.assertEqual(derived._changes, set()) - - def test__set_properties(self): - mixin = self._make_one() - self.assertEqual(mixin._properties, {}) - VALUE = object() - mixin._set_properties(VALUE) - self.assertEqual(mixin._properties, VALUE) - - def test__patch_property(self): - derived = self._derivedClass()() - derived._patch_property("foo", "Foo") - self.assertEqual(derived._properties, {"foo": "Foo"}) - - def test_patch(self): - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path")() - # Make sure changes is non-empty, so we can observe a change. - BAR = object() - BAZ = object() - derived._properties = {"bar": BAR, "baz": BAZ} - derived._changes = set(["bar"]) # Ignore baz. - derived.patch(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/path", - "query_params": {"projection": "full"}, - # Since changes does not include `baz`, we don't see it sent. - "data": {"bar": BAR}, - "_target_object": derived, - }, - ) - # Make sure changes get reset by patch(). - self.assertEqual(derived._changes, set()) - - def test_patch_w_user_project(self): - user_project = "user-project-123" - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path", user_project)() - # Make sure changes is non-empty, so we can observe a change. - BAR = object() - BAZ = object() - derived._properties = {"bar": BAR, "baz": BAZ} - derived._changes = set(["bar"]) # Ignore baz. - derived.patch(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/path", - "query_params": {"projection": "full", "userProject": user_project}, - # Since changes does not include `baz`, we don't see it sent. - "data": {"bar": BAR}, - "_target_object": derived, - }, - ) - # Make sure changes get reset by patch(). - self.assertEqual(derived._changes, set()) - - def test_update(self): - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path")() - # Make sure changes is non-empty, so we can observe a change. - BAR = object() - BAZ = object() - derived._properties = {"bar": BAR, "baz": BAZ} - derived._changes = set(["bar"]) # Update sends 'baz' anyway. - derived.update(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "/path") - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - self.assertEqual(kw[0]["data"], {"bar": BAR, "baz": BAZ}) - # Make sure changes get reset by patch(). - self.assertEqual(derived._changes, set()) - - def test_update_w_user_project(self): - user_project = "user-project-123" - connection = _Connection({"foo": "Foo"}) - client = _Client(connection) - derived = self._derivedClass("/path", user_project)() - # Make sure changes is non-empty, so we can observe a change. - BAR = object() - BAZ = object() - derived._properties = {"bar": BAR, "baz": BAZ} - derived._changes = set(["bar"]) # Update sends 'baz' anyway. - derived.update(client=client) - self.assertEqual(derived._properties, {"foo": "Foo"}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "/path") - self.assertEqual( - kw[0]["query_params"], {"projection": "full", "userProject": user_project} - ) - self.assertEqual(kw[0]["data"], {"bar": BAR, "baz": BAZ}) - # Make sure changes get reset by patch(). - self.assertEqual(derived._changes, set()) - - -class Test__scalar_property(unittest.TestCase): - def _call_fut(self, fieldName): - from google.cloud.storage._helpers import _scalar_property - - return _scalar_property(fieldName) - - def test_getter(self): - class Test(object): - def __init__(self, **kw): - self._properties = kw.copy() - - do_re_mi = self._call_fut("solfege") - - test = Test(solfege="Latido") - self.assertEqual(test.do_re_mi, "Latido") - - def test_setter(self): - class Test(object): - def _patch_property(self, name, value): - self._patched = (name, value) - - do_re_mi = self._call_fut("solfege") - - test = Test() - test.do_re_mi = "Latido" - self.assertEqual(test._patched, ("solfege", "Latido")) - - -class Test__base64_md5hash(unittest.TestCase): - def _call_fut(self, bytes_to_sign): - from google.cloud.storage._helpers import _base64_md5hash - - return _base64_md5hash(bytes_to_sign) - - def test_it(self): - from io import BytesIO - - BYTES_TO_SIGN = b"FOO" - BUFFER = BytesIO() - BUFFER.write(BYTES_TO_SIGN) - BUFFER.seek(0) - - SIGNED_CONTENT = self._call_fut(BUFFER) - self.assertEqual(SIGNED_CONTENT, b"kBiQqOnIz21aGlQrIp/r/w==") - - def test_it_with_stubs(self): - import mock - - class _Buffer(object): - def __init__(self, return_vals): - self.return_vals = return_vals - self._block_sizes = [] - - def read(self, block_size): - self._block_sizes.append(block_size) - return self.return_vals.pop() - - BASE64 = _Base64() - DIGEST_VAL = object() - BYTES_TO_SIGN = b"BYTES_TO_SIGN" - BUFFER = _Buffer([b"", BYTES_TO_SIGN]) - MD5 = _MD5(DIGEST_VAL) - - patch = mock.patch.multiple( - "google.cloud.storage._helpers", base64=BASE64, md5=MD5 - ) - with patch: - SIGNED_CONTENT = self._call_fut(BUFFER) - - self.assertEqual(BUFFER._block_sizes, [8192, 8192]) - self.assertIs(SIGNED_CONTENT, DIGEST_VAL) - self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL]) - self.assertEqual(MD5._called, [None]) - self.assertEqual(MD5.hash_obj.num_digest_calls, 1) - self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN]) - - -class _Connection(object): - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _MD5Hash(object): - def __init__(self, digest_val): - self.digest_val = digest_val - self.num_digest_calls = 0 - self._blocks = [] - - def update(self, block): - self._blocks.append(block) - - def digest(self): - self.num_digest_calls += 1 - return self.digest_val - - -class _MD5(object): - def __init__(self, digest_val): - self.hash_obj = _MD5Hash(digest_val) - self._called = [] - - def __call__(self, data=None): - self._called.append(data) - return self.hash_obj - - -class _Base64(object): - def __init__(self): - self._called_b64encode = [] - - def b64encode(self, value): - self._called_b64encode.append(value) - return value - - -class _Client(object): - def __init__(self, connection): - self._connection = connection diff --git a/storage/tests/unit/test__http.py b/storage/tests/unit/test__http.py deleted file mode 100644 index 21009188e12a..000000000000 --- a/storage/tests/unit/test__http.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class TestConnection(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage._http import Connection - - return Connection - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_extra_headers(self): - import requests - from google.cloud import _http as base_http - - http = mock.create_autospec(requests.Session, instance=True) - response = requests.Response() - response.status_code = 200 - data = b"brent-spiner" - response._content = data - http.request.return_value = response - client = mock.Mock(_http=http, spec=["_http"]) - - conn = self._make_one(client) - req_data = "hey-yoooouuuuu-guuuuuyyssss" - result = conn.api_request("GET", "/rainbow", data=req_data, expect_json=False) - self.assertEqual(result, data) - - expected_headers = { - "Accept-Encoding": "gzip", - base_http.CLIENT_INFO_HEADER: conn.user_agent, - "User-Agent": conn.user_agent, - } - expected_uri = conn.build_api_url("/rainbow") - http.request.assert_called_once_with( - data=req_data, - headers=expected_headers, - method="GET", - url=expected_uri, - timeout=base_http._DEFAULT_TIMEOUT, - ) - - def test_build_api_url_no_extra_query_params(self): - conn = self._make_one(object()) - URI = "/".join([conn.DEFAULT_API_ENDPOINT, "storage", conn.API_VERSION, "foo"]) - self.assertEqual(conn.build_api_url("/foo"), URI) - - def test_build_api_url_w_custom_endpoint(self): - custom_endpoint = "https://foo-googleapis.com" - conn = self._make_one(object(), api_endpoint=custom_endpoint) - URI = "/".join([custom_endpoint, "storage", conn.API_VERSION, "foo"]) - self.assertEqual(conn.build_api_url("/foo"), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - - conn = self._make_one(object()) - uri = conn.build_api_url("/foo", {"bar": "baz"}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, "/".join(["", "storage", conn.API_VERSION, "foo"])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms["bar"], "baz") diff --git a/storage/tests/unit/test__signing.py b/storage/tests/unit/test__signing.py deleted file mode 100644 index ebd7f9c177bf..000000000000 --- a/storage/tests/unit/test__signing.py +++ /dev/null @@ -1,775 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import binascii -import calendar -import datetime -import io -import json -import os -import time -import unittest - -import mock -import pytest -import six -from six.moves import urllib_parse - - -def _read_local_json(json_file): - here = os.path.dirname(__file__) - json_path = os.path.abspath(os.path.join(here, json_file)) - with io.open(json_path, "r", encoding="utf-8-sig") as fileobj: - return json.load(fileobj) - - -_SERVICE_ACCOUNT_JSON = _read_local_json("url_signer_v4_test_account.json") -_CONFORMANCE_TESTS = _read_local_json("url_signer_v4_test_data.json") -_CLIENT_TESTS = [test for test in _CONFORMANCE_TESTS if "bucket" not in test] -_BUCKET_TESTS = [ - test for test in _CONFORMANCE_TESTS if "bucket" in test and not test.get("object") -] -_BLOB_TESTS = [ - test for test in _CONFORMANCE_TESTS if "bucket" in test and test.get("object") -] - - -def _utc_seconds(when): - return int(calendar.timegm(when.timetuple())) - - -class Test_get_expiration_seconds_v2(unittest.TestCase): - @staticmethod - def _call_fut(expiration): - from google.cloud.storage._signing import get_expiration_seconds_v2 - - return get_expiration_seconds_v2(expiration) - - def test_w_invalid_expiration_type(self): - with self.assertRaises(TypeError): - self._call_fut(object(), None) - - def test_w_expiration_none(self): - with self.assertRaises(TypeError): - self._call_fut(None) - - def test_w_expiration_int(self): - self.assertEqual(self._call_fut(123), 123) - - def test_w_expiration_long(self): - if not six.PY2: - raise unittest.SkipTest("No long on Python 3+") - - self.assertEqual(self._call_fut(long(123)), 123) # noqa: F821 - - def test_w_expiration_naive_datetime(self): - expiration_no_tz = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - utc_seconds = _utc_seconds(expiration_no_tz) - self.assertEqual(self._call_fut(expiration_no_tz), utc_seconds) - - def test_w_expiration_utc_datetime(self): - from google.cloud._helpers import UTC - - expiration_utc = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC) - utc_seconds = _utc_seconds(expiration_utc) - self.assertEqual(self._call_fut(expiration_utc), utc_seconds) - - def test_w_expiration_other_zone_datetime(self): - from google.cloud._helpers import _UTC - - class CET(_UTC): - _tzname = "CET" - _utcoffset = datetime.timedelta(hours=1) - - zone = CET() - expiration_other = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, zone) - utc_seconds = _utc_seconds(expiration_other) - cet_seconds = utc_seconds - (60 * 60) # CET one hour earlier than UTC - self.assertEqual(self._call_fut(expiration_other), cet_seconds) - - def test_w_expiration_timedelta_seconds(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - utc_seconds = _utc_seconds(dummy_utcnow) - expiration_as_delta = datetime.timedelta(seconds=10) - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_as_delta) - - self.assertEqual(result, utc_seconds + 10) - utcnow.assert_called_once_with() - - def test_w_expiration_timedelta_days(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - utc_seconds = _utc_seconds(dummy_utcnow) - expiration_as_delta = datetime.timedelta(days=1) - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_as_delta) - - self.assertEqual(result, utc_seconds + 86400) - utcnow.assert_called_once_with() - - -class Test_get_expiration_seconds_v4(unittest.TestCase): - @staticmethod - def _call_fut(expiration): - from google.cloud.storage._signing import get_expiration_seconds_v4 - - return get_expiration_seconds_v4(expiration) - - def test_w_invalid_expiration_type(self): - with self.assertRaises(TypeError): - self._call_fut(object(), None) - - def test_w_expiration_none(self): - with self.assertRaises(TypeError): - self._call_fut(None) - - def test_w_expiration_int_gt_seven_days(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - delta = datetime.timedelta(days=10) - expiration_utc = dummy_utcnow + delta - expiration_seconds = _utc_seconds(expiration_utc) - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - - with patch as utcnow: - with self.assertRaises(ValueError): - self._call_fut(expiration_seconds) - utcnow.assert_called_once_with() - - def test_w_expiration_int(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - expiration_seconds = 10 - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - - with patch as utcnow: - result = self._call_fut(expiration_seconds) - - self.assertEqual(result, expiration_seconds) - utcnow.assert_called_once_with() - - def test_w_expiration_naive_datetime(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - delta = datetime.timedelta(seconds=10) - expiration_no_tz = dummy_utcnow + delta - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_no_tz) - - self.assertEqual(result, delta.seconds) - utcnow.assert_called_once_with() - - def test_w_expiration_utc_datetime(self): - from google.cloud._helpers import UTC - - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC) - delta = datetime.timedelta(seconds=10) - expiration_utc = dummy_utcnow + delta - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_utc) - - self.assertEqual(result, delta.seconds) - utcnow.assert_called_once_with() - - def test_w_expiration_other_zone_datetime(self): - from google.cloud._helpers import UTC - from google.cloud._helpers import _UTC - - class CET(_UTC): - _tzname = "CET" - _utcoffset = datetime.timedelta(hours=1) - - zone = CET() - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC) - dummy_cetnow = dummy_utcnow.astimezone(zone) - delta = datetime.timedelta(seconds=10) - expiration_other = dummy_cetnow + delta - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_other) - - self.assertEqual(result, delta.seconds) - utcnow.assert_called_once_with() - - def test_w_expiration_timedelta(self): - dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0) - expiration_as_delta = datetime.timedelta(seconds=10) - - patch = mock.patch( - "google.cloud.storage._signing.NOW", return_value=dummy_utcnow - ) - with patch as utcnow: - result = self._call_fut(expiration_as_delta) - - self.assertEqual(result, expiration_as_delta.total_seconds()) - utcnow.assert_called_once_with() - - -class Test_get_signed_query_params_v2(unittest.TestCase): - @staticmethod - def _call_fut(credentials, expiration, string_to_sign): - from google.cloud.storage._signing import get_signed_query_params_v2 - - return get_signed_query_params_v2(credentials, expiration, string_to_sign) - - def test_it(self): - sig_bytes = b"DEADBEEF" - account_name = mock.sentinel.service_account_email - credentials = _make_credentials(signer_email=account_name) - credentials.sign_bytes.return_value = sig_bytes - expiration = 100 - string_to_sign = "dummy_signature" - result = self._call_fut(credentials, expiration, string_to_sign) - - expected = { - "GoogleAccessId": account_name, - "Expires": str(expiration), - "Signature": base64.b64encode(sig_bytes), - } - self.assertEqual(result, expected) - credentials.sign_bytes.assert_called_once_with(string_to_sign) - - -class Test_get_canonical_headers(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage._signing import get_canonical_headers - - return get_canonical_headers(*args, **kwargs) - - def test_w_none(self): - headers = None - expected_canonical = [] - expected_ordered = [] - canonical, ordered = self._call_fut(headers) - self.assertEqual(canonical, expected_canonical) - self.assertEqual(ordered, expected_ordered) - - def test_w_dict(self): - headers = {"foo": "Foo 1.2.3", "Bar": " baz,bam,qux "} - expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"] - expected_ordered = [tuple(item.split(":")) for item in expected_canonical] - canonical, ordered = self._call_fut(headers) - self.assertEqual(canonical, expected_canonical) - self.assertEqual(ordered, expected_ordered) - - def test_w_list_and_multiples(self): - headers = [ - ("foo", "Foo 1.2.3"), - ("Bar", " baz"), - ("Bar", "bam"), - ("Bar", "qux "), - ] - expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"] - expected_ordered = [tuple(item.split(":")) for item in expected_canonical] - canonical, ordered = self._call_fut(headers) - self.assertEqual(canonical, expected_canonical) - self.assertEqual(ordered, expected_ordered) - - def test_w_embedded_ws(self): - headers = {"foo": "Foo\n1.2.3", "Bar": " baz bam qux "} - expected_canonical = ["bar:baz bam qux", "foo:Foo 1.2.3"] - expected_ordered = [tuple(item.split(":")) for item in expected_canonical] - canonical, ordered = self._call_fut(headers) - self.assertEqual(canonical, expected_canonical) - self.assertEqual(ordered, expected_ordered) - - -class Test_canonicalize(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage._signing import canonicalize - - return canonicalize(*args, **kwargs) - - def test_wo_headers_or_query_parameters(self): - method = "GET" - resource = "/bucket/blob" - canonical = self._call_fut(method, resource, None, None) - self.assertEqual(canonical.method, method) - self.assertEqual(canonical.resource, resource) - self.assertEqual(canonical.query_parameters, []) - self.assertEqual(canonical.headers, []) - - def test_w_headers_and_resumable(self): - method = "RESUMABLE" - resource = "/bucket/blob" - headers = [("x-goog-extension", "foobar")] - canonical = self._call_fut(method, resource, None, headers) - self.assertEqual(canonical.method, "POST") - self.assertEqual(canonical.resource, resource) - self.assertEqual(canonical.query_parameters, []) - self.assertEqual( - canonical.headers, ["x-goog-extension:foobar", "x-goog-resumable:start"] - ) - - def test_w_query_paramters(self): - method = "GET" - resource = "/bucket/blob" - query_parameters = {"foo": "bar", "baz": "qux"} - canonical = self._call_fut(method, resource, query_parameters, None) - self.assertEqual(canonical.method, method) - self.assertEqual(canonical.resource, "{}?baz=qux&foo=bar".format(resource)) - self.assertEqual(canonical.query_parameters, [("baz", "qux"), ("foo", "bar")]) - self.assertEqual(canonical.headers, []) - - -class Test_generate_signed_url_v2(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage._signing import generate_signed_url_v2 - - return generate_signed_url_v2(*args, **kwargs) - - def _generate_helper( - self, - api_access_endpoint="", - method="GET", - content_md5=None, - content_type=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - ): - from six.moves.urllib.parse import urlencode - - resource = "/name/path" - credentials = _make_credentials(signer_email="service@example.com") - credentials.sign_bytes.return_value = b"DEADBEEF" - signed = base64.b64encode(credentials.sign_bytes.return_value) - signed = signed.decode("ascii") - - expiration = 1000 - - url = self._call_fut( - credentials, - resource, - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method, - content_md5=content_md5, - content_type=content_type, - response_type=response_type, - response_disposition=response_disposition, - generation=generation, - headers=headers, - query_parameters=query_parameters, - service_account_email=None, - access_token=None, - ) - - # Check the mock was called. - method = method.upper() - - if headers is None: - headers = [] - elif isinstance(headers, dict): - headers = sorted(headers.items()) - - elements = [] - expected_resource = resource - if method == "RESUMABLE": - elements.append("POST") - headers.append(("x-goog-resumable", "start")) - else: - elements.append(method) - - if query_parameters is not None: - normalized_qp = { - key.lower(): value and value.strip() or "" - for key, value in query_parameters.items() - } - expected_qp = urlencode(sorted(normalized_qp.items())) - expected_resource = "{}?{}".format(resource, expected_qp) - - elements.append(content_md5 or "") - elements.append(content_type or "") - elements.append(str(expiration)) - elements.extend(["{}:{}".format(*header) for header in headers]) - elements.append(expected_resource) - - string_to_sign = "\n".join(elements) - - credentials.sign_bytes.assert_called_once_with(string_to_sign) - - scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url) - expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit( - api_access_endpoint - ) - self.assertEqual(scheme, expected_scheme) - self.assertEqual(netloc, expected_netloc) - self.assertEqual(path, resource) - self.assertEqual(frag, "") - - # Check the URL parameters. - params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True)) - - self.assertEqual(params["GoogleAccessId"], credentials.signer_email) - self.assertEqual(params["Expires"], str(expiration)) - self.assertEqual(params["Signature"], signed) - - if response_type is not None: - self.assertEqual(params["response-content-type"], response_type) - - if response_disposition is not None: - self.assertEqual( - params["response-content-disposition"], response_disposition - ) - - if generation is not None: - self.assertEqual(params["generation"], str(generation)) - - if query_parameters is not None: - for key, value in query_parameters.items(): - value = value.strip() if value else "" - self.assertEqual(params[key].lower(), value) - - def test_w_expiration_int(self): - self._generate_helper() - - def test_w_endpoint(self): - api_access_endpoint = "https://api.example.com" - self._generate_helper(api_access_endpoint=api_access_endpoint) - - def test_w_method(self): - method = "POST" - self._generate_helper(method=method) - - def test_w_method_resumable(self): - method = "RESUMABLE" - self._generate_helper(method=method) - - def test_w_response_type(self): - response_type = "text/plain" - self._generate_helper(response_type=response_type) - - def test_w_response_disposition(self): - response_disposition = "attachment; filename=blob.png" - self._generate_helper(response_disposition=response_disposition) - - def test_w_generation(self): - generation = "123" - self._generate_helper(generation=generation) - - def test_w_custom_headers_dict(self): - self._generate_helper(headers={"x-goog-foo": "bar"}) - - def test_w_custom_headers_list(self): - self._generate_helper(headers=[("x-goog-foo", "bar")]) - - def test_w_custom_query_parameters_w_string_value(self): - self._generate_helper(query_parameters={"bar": "/"}) - - def test_w_custom_query_parameters_w_none_value(self): - self._generate_helper(query_parameters={"qux": None}) - - def test_with_google_credentials(self): - resource = "/name/path" - credentials = _make_credentials() - expiration = int(time.time() + 5) - with self.assertRaises(AttributeError): - self._call_fut(credentials, resource=resource, expiration=expiration) - - def test_with_access_token(self): - resource = "/name/path" - credentials = _make_credentials() - expiration = int(time.time() + 5) - email = mock.sentinel.service_account_email - with mock.patch( - "google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF" - ): - self._call_fut( - credentials, - resource=resource, - expiration=expiration, - service_account_email=email, - access_token="token", - ) - - -class Test_generate_signed_url_v4(unittest.TestCase): - DEFAULT_EXPIRATION = 1000 - - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage._signing import generate_signed_url_v4 - - return generate_signed_url_v4(*args, **kwargs) - - def _generate_helper( - self, - expiration=DEFAULT_EXPIRATION, - api_access_endpoint="", - method="GET", - content_type=None, - content_md5=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - ): - now = datetime.datetime(2019, 2, 26, 19, 53, 27) - resource = "/name/path" - signer_email = "service@example.com" - credentials = _make_credentials(signer_email=signer_email) - credentials.sign_bytes.return_value = b"DEADBEEF" - - with mock.patch("google.cloud.storage._signing.NOW", lambda: now): - url = self._call_fut( - credentials, - resource, - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method, - content_type=content_type, - content_md5=content_md5, - response_type=response_type, - response_disposition=response_disposition, - generation=generation, - headers=headers, - query_parameters=query_parameters, - ) - - # Check the mock was called. - credentials.sign_bytes.assert_called_once() - - scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url) - - expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit( - api_access_endpoint - ) - self.assertEqual(scheme, expected_scheme) - self.assertEqual(netloc, expected_netloc) - self.assertEqual(path, resource) - self.assertEqual(frag, "") - - # Check the URL parameters. - params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True)) - self.assertEqual(params["X-Goog-Algorithm"], "GOOG4-RSA-SHA256") - - now_date = now.date().strftime("%Y%m%d") - expected_cred = "{}/{}/auto/storage/goog4_request".format( - signer_email, now_date - ) - self.assertEqual(params["X-Goog-Credential"], expected_cred) - - now_stamp = now.strftime("%Y%m%dT%H%M%SZ") - self.assertEqual(params["X-Goog-Date"], now_stamp) - self.assertEqual(params["X-Goog-Expires"], str(self.DEFAULT_EXPIRATION)) - - signed = binascii.hexlify(credentials.sign_bytes.return_value).decode("ascii") - self.assertEqual(params["X-Goog-Signature"], signed) - - if response_type is not None: - self.assertEqual(params["response-content-type"], response_type) - - if response_disposition is not None: - self.assertEqual( - params["response-content-disposition"], response_disposition - ) - - if generation is not None: - self.assertEqual(params["generation"], str(generation)) - - if query_parameters is not None: - for key, value in query_parameters.items(): - value = value.strip() if value else "" - self.assertEqual(params[key].lower(), value) - - def test_w_expiration_too_long(self): - with self.assertRaises(ValueError): - self._generate_helper(expiration=datetime.timedelta(days=8)) - - def test_w_defaults(self): - self._generate_helper() - - def test_w_api_access_endpoint(self): - self._generate_helper(api_access_endpoint="http://api.example.com") - - def test_w_method(self): - self._generate_helper(method="PUT") - - def test_w_method_resumable(self): - self._generate_helper(method="RESUMABLE") - - def test_w_content_type(self): - self._generate_helper(content_type="text/plain") - - def test_w_content_md5(self): - self._generate_helper(content_md5="FACEDACE") - - def test_w_response_type(self): - self._generate_helper(response_type="application/octets") - - def test_w_response_disposition(self): - self._generate_helper(response_disposition="attachment") - - def test_w_generation(self): - self._generate_helper(generation=12345) - - def test_w_custom_host_header(self): - self._generate_helper(headers={"Host": "api.example.com"}) - - def test_w_custom_headers(self): - self._generate_helper(headers={"x-goog-foo": "bar"}) - - def test_w_custom_query_parameters_w_string_value(self): - self._generate_helper(query_parameters={"bar": "/"}) - - def test_w_custom_query_parameters_w_none_value(self): - self._generate_helper(query_parameters={"qux": None}) - - def test_with_access_token(self): - resource = "/name/path" - signer_email = "service@example.com" - credentials = _make_credentials(signer_email=signer_email) - with mock.patch( - "google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF" - ): - self._call_fut( - credentials, - resource=resource, - expiration=datetime.timedelta(days=5), - service_account_email=signer_email, - access_token="token", - ) - - -class Test_sign_message(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage._signing import _sign_message - - return _sign_message(*args, **kwargs) - - def test_sign_bytes(self): - signature = "DEADBEEF" - data = {"signature": signature} - request = make_request(200, data) - with mock.patch("google.auth.transport.requests.Request", return_value=request): - returned_signature = self._call_fut( - "123", service_account_email="service@example.com", access_token="token" - ) - assert returned_signature == signature - - def test_sign_bytes_failure(self): - from google.auth import exceptions - - request = make_request(401) - with mock.patch("google.auth.transport.requests.Request", return_value=request): - with pytest.raises(exceptions.TransportError): - self._call_fut( - "123", - service_account_email="service@example.com", - access_token="token", - ) - - -_DUMMY_SERVICE_ACCOUNT = None - - -def dummy_service_account(): - global _DUMMY_SERVICE_ACCOUNT - - from google.oauth2.service_account import Credentials - - if _DUMMY_SERVICE_ACCOUNT is None: - _DUMMY_SERVICE_ACCOUNT = Credentials.from_service_account_info( - _SERVICE_ACCOUNT_JSON - ) - - return _DUMMY_SERVICE_ACCOUNT - - -def _run_conformance_test(resource, test_data): - credentials = dummy_service_account() - - url = Test_generate_signed_url_v4._call_fut( - credentials, - resource, - expiration=test_data["expiration"], - method=test_data["method"], - _request_timestamp=test_data["timestamp"], - headers=test_data.get("headers"), - ) - - assert url == test_data["expectedUrl"] - - -@pytest.mark.parametrize("test_data", _CLIENT_TESTS) -@pytest.mark.skip(reason="Bucketless URLs not yet supported") -def test_conformance_client(test_data): - pass # pragma: NO COVER - - -@pytest.mark.parametrize("test_data", _BUCKET_TESTS) -def test_conformance_bucket(test_data): - resource = "/{}".format(test_data["bucket"]) - _run_conformance_test(resource, test_data) - - -@pytest.mark.parametrize("test_data", _BLOB_TESTS) -def test_conformance_blob(test_data): - resource = "/{}/{}".format(test_data["bucket"], test_data["object"]) - _run_conformance_test(resource, test_data) - - -def _make_credentials(signer_email=None): - import google.auth.credentials - - if signer_email: - credentials = mock.Mock(spec=google.auth.credentials.Signing) - credentials.signer_email = signer_email - return credentials - else: - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def make_request(status, data=None): - from google.auth import transport - - response = mock.create_autospec(transport.Response, instance=True) - response.status = status - if data is not None: - response.data = json.dumps(data).encode("utf-8") - - request = mock.create_autospec(transport.Request) - request.return_value = response - return request diff --git a/storage/tests/unit/test_acl.py b/storage/tests/unit/test_acl.py deleted file mode 100644 index d66a9439c1cc..000000000000 --- a/storage/tests/unit/test_acl.py +++ /dev/null @@ -1,886 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - - -class Test_ACLEntity(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.acl import _ACLEntity - - return _ACLEntity - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor_default_identifier(self): - TYPE = "type" - entity = self._make_one(TYPE) - self.assertEqual(entity.type, TYPE) - self.assertIsNone(entity.identifier) - self.assertEqual(entity.get_roles(), set()) - - def test_ctor_w_identifier(self): - TYPE = "type" - ID = "id" - entity = self._make_one(TYPE, ID) - self.assertEqual(entity.type, TYPE) - self.assertEqual(entity.identifier, ID) - self.assertEqual(entity.get_roles(), set()) - - def test___str__no_identifier(self): - TYPE = "type" - entity = self._make_one(TYPE) - self.assertEqual(str(entity), TYPE) - - def test___str__w_identifier(self): - TYPE = "type" - ID = "id" - entity = self._make_one(TYPE, ID) - self.assertEqual(str(entity), "%s-%s" % (TYPE, ID)) - - def test_grant_simple(self): - TYPE = "type" - ROLE = "role" - entity = self._make_one(TYPE) - entity.grant(ROLE) - self.assertEqual(entity.get_roles(), set([ROLE])) - - def test_grant_duplicate(self): - TYPE = "type" - ROLE1 = "role1" - ROLE2 = "role2" - entity = self._make_one(TYPE) - entity.grant(ROLE1) - entity.grant(ROLE2) - entity.grant(ROLE1) - self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2])) - - def test_revoke_miss(self): - TYPE = "type" - ROLE = "nonesuch" - entity = self._make_one(TYPE) - entity.revoke(ROLE) - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_hit(self): - TYPE = "type" - ROLE1 = "role1" - ROLE2 = "role2" - entity = self._make_one(TYPE) - entity.grant(ROLE1) - entity.grant(ROLE2) - entity.revoke(ROLE1) - self.assertEqual(entity.get_roles(), set([ROLE2])) - - def test_grant_read(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant_read() - self.assertEqual(entity.get_roles(), set([entity.READER_ROLE])) - - def test_grant_write(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant_write() - self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE])) - - def test_grant_owner(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant_owner() - self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE])) - - def test_revoke_read(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant(entity.READER_ROLE) - entity.revoke_read() - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_write(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant(entity.WRITER_ROLE) - entity.revoke_write() - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_owner(self): - TYPE = "type" - entity = self._make_one(TYPE) - entity.grant(entity.OWNER_ROLE) - entity.revoke_owner() - self.assertEqual(entity.get_roles(), set()) - - -class Test_ACL(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.acl import ACL - - return ACL - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_validate_predefined(self): - ACL = self._get_target_class() - self.assertIsNone(ACL.validate_predefined(None)) - self.assertEqual(ACL.validate_predefined("public-read"), "publicRead") - self.assertEqual(ACL.validate_predefined("publicRead"), "publicRead") - with self.assertRaises(ValueError): - ACL.validate_predefined("publicread") - - def test_ctor(self): - acl = self._make_one() - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - - def test__ensure_loaded(self): - acl = self._make_one() - - def _reload(): - acl._really_loaded = True - - acl.reload = _reload - acl._ensure_loaded() - self.assertTrue(acl._really_loaded) - - def test_client_is_abstract(self): - acl = self._make_one() - self.assertRaises(NotImplementedError, lambda: acl.client) - - def test_reset(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - acl.entity(TYPE, ID) - acl.reset() - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - - def test___iter___empty_eager(self): - acl = self._make_one() - acl.loaded = True - self.assertEqual(list(acl), []) - - def test___iter___empty_lazy(self): - acl = self._make_one() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertEqual(list(acl), []) - self.assertTrue(acl.loaded) - - def test___iter___non_empty_no_roles(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertEqual(list(acl), []) - - def test___iter___non_empty_w_roles(self): - TYPE = "type" - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - entity.grant(ROLE) - self.assertEqual(list(acl), [{"entity": "%s-%s" % (TYPE, ID), "role": ROLE}]) - - def test___iter___non_empty_w_empty_role(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - entity.grant("") - self.assertEqual(list(acl), []) - - def test_entity_from_dict_allUsers_eager(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.entity_from_dict({"entity": "allUsers", "role": ROLE}) - self.assertEqual(entity.type, "allUsers") - self.assertIsNone(entity.identifier) - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), [{"entity": "allUsers", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_allAuthenticatedUsers(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.entity_from_dict({"entity": "allAuthenticatedUsers", "role": ROLE}) - self.assertEqual(entity.type, "allAuthenticatedUsers") - self.assertIsNone(entity.identifier) - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), [{"entity": "allAuthenticatedUsers", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_string_w_hyphen(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.entity_from_dict({"entity": "type-id", "role": ROLE}) - self.assertEqual(entity.type, "type") - self.assertEqual(entity.identifier, "id") - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_string_wo_hyphen(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - self.assertRaises( - ValueError, acl.entity_from_dict, {"entity": "bogus", "role": ROLE} - ) - self.assertEqual(list(acl.get_entities()), []) - - def test_has_entity_miss_str_eager(self): - acl = self._make_one() - acl.loaded = True - self.assertFalse(acl.has_entity("nonesuch")) - - def test_has_entity_miss_str_lazy(self): - acl = self._make_one() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertFalse(acl.has_entity("nonesuch")) - self.assertTrue(acl.loaded) - - def test_has_entity_miss_entity(self): - from google.cloud.storage.acl import _ACLEntity - - TYPE = "type" - ID = "id" - entity = _ACLEntity(TYPE, ID) - acl = self._make_one() - acl.loaded = True - self.assertFalse(acl.has_entity(entity)) - - def test_has_entity_hit_str(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity("%s-%s" % (TYPE, ID))) - - def test_has_entity_hit_entity(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity(entity)) - - def test_get_entity_miss_str_no_default_eager(self): - acl = self._make_one() - acl.loaded = True - self.assertIsNone(acl.get_entity("nonesuch")) - - def test_get_entity_miss_str_no_default_lazy(self): - acl = self._make_one() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertIsNone(acl.get_entity("nonesuch")) - self.assertTrue(acl.loaded) - - def test_get_entity_miss_entity_no_default(self): - from google.cloud.storage.acl import _ACLEntity - - TYPE = "type" - ID = "id" - entity = _ACLEntity(TYPE, ID) - acl = self._make_one() - acl.loaded = True - self.assertIsNone(acl.get_entity(entity)) - - def test_get_entity_miss_str_w_default(self): - DEFAULT = object() - acl = self._make_one() - acl.loaded = True - self.assertIs(acl.get_entity("nonesuch", DEFAULT), DEFAULT) - - def test_get_entity_miss_entity_w_default(self): - from google.cloud.storage.acl import _ACLEntity - - DEFAULT = object() - TYPE = "type" - ID = "id" - entity = _ACLEntity(TYPE, ID) - acl = self._make_one() - acl.loaded = True - self.assertIs(acl.get_entity(entity, DEFAULT), DEFAULT) - - def test_get_entity_hit_str(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity("%s-%s" % (TYPE, ID))) - - def test_get_entity_hit_entity(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity(entity)) - - def test_add_entity_miss_eager(self): - from google.cloud.storage.acl import _ACLEntity - - TYPE = "type" - ID = "id" - ROLE = "role" - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._make_one() - acl.loaded = True - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_add_entity_miss_lazy(self): - from google.cloud.storage.acl import _ACLEntity - - TYPE = "type" - ID = "id" - ROLE = "role" - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._make_one() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - self.assertTrue(acl.loaded) - - def test_add_entity_hit(self): - from google.cloud.storage.acl import _ACLEntity - - TYPE = "type" - ID = "id" - ENTITY_VAL = "%s-%s" % (TYPE, ID) - ROLE = "role" - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._make_one() - acl.loaded = True - before = acl.entity(TYPE, ID) - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertIsNot(acl.get_entity(ENTITY_VAL), before) - self.assertIs(acl.get_entity(ENTITY_VAL), entity) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_miss(self): - TYPE = "type" - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.loaded) - entity.grant(ROLE) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_hit(self): - TYPE = "type" - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - before = acl.entity(TYPE, ID) - before.grant(ROLE) - entity = acl.entity(TYPE, ID) - self.assertIs(entity, before) - self.assertEqual(list(acl), [{"entity": "type-id", "role": ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_user(self): - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.user(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, "user") - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), [{"entity": "user-%s" % ID, "role": ROLE}]) - - def test_group(self): - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.group(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, "group") - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), [{"entity": "group-%s" % ID, "role": ROLE}]) - - def test_domain(self): - ID = "id" - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.domain(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, "domain") - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), [{"entity": "domain-%s" % ID, "role": ROLE}]) - - def test_all(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.all() - entity.grant(ROLE) - self.assertEqual(entity.type, "allUsers") - self.assertIsNone(entity.identifier) - self.assertEqual(list(acl), [{"entity": "allUsers", "role": ROLE}]) - - def test_all_authenticated(self): - ROLE = "role" - acl = self._make_one() - acl.loaded = True - entity = acl.all_authenticated() - entity.grant(ROLE) - self.assertEqual(entity.type, "allAuthenticatedUsers") - self.assertIsNone(entity.identifier) - self.assertEqual(list(acl), [{"entity": "allAuthenticatedUsers", "role": ROLE}]) - - def test_get_entities_empty_eager(self): - acl = self._make_one() - acl.loaded = True - self.assertEqual(acl.get_entities(), []) - - def test_get_entities_empty_lazy(self): - acl = self._make_one() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertEqual(acl.get_entities(), []) - self.assertTrue(acl.loaded) - - def test_get_entities_nonempty(self): - TYPE = "type" - ID = "id" - acl = self._make_one() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertEqual(acl.get_entities(), [entity]) - - def test_reload_missing(self): - # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/652 - ROLE = "role" - connection = _Connection({}) - client = _Client(connection) - acl = self._make_one() - acl.reload_path = "/testing/acl" - acl.loaded = True - acl.entity("allUsers", ROLE) - acl.reload(client=client) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], {"method": "GET", "path": "/testing/acl", "query_params": {}} - ) - - def test_reload_empty_result_clears_local(self): - ROLE = "role" - connection = _Connection({"items": []}) - client = _Client(connection) - acl = self._make_one() - acl.reload_path = "/testing/acl" - acl.loaded = True - acl.entity("allUsers", ROLE) - - acl.reload(client=client) - - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], {"method": "GET", "path": "/testing/acl", "query_params": {}} - ) - - def test_reload_nonempty_result_w_user_project(self): - ROLE = "role" - USER_PROJECT = "user-project-123" - connection = _Connection({"items": [{"entity": "allUsers", "role": ROLE}]}) - client = _Client(connection) - acl = self._make_one() - acl.reload_path = "/testing/acl" - acl.loaded = True - acl.user_project = USER_PROJECT - - acl.reload(client=client) - - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), [{"entity": "allUsers", "role": ROLE}]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "/testing/acl", - "query_params": {"userProject": USER_PROJECT}, - }, - ) - - def test_save_none_set_none_passed(self): - connection = _Connection() - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.save(client=client) - kw = connection._requested - self.assertEqual(len(kw), 0) - - def test_save_existing_missing_none_passed(self): - connection = _Connection({}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.save(client=client) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/testing") - self.assertEqual(kw[0]["data"], {"acl": []}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - - def test_save_no_acl(self): - ROLE = "role" - AFTER = [{"entity": "allUsers", "role": ROLE}] - connection = _Connection({"acl": AFTER}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.entity("allUsers").grant(ROLE) - acl.save(client=client) - self.assertEqual(list(acl), AFTER) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/testing") - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": {"projection": "full"}, - "data": {"acl": AFTER}, - }, - ) - - def test_save_w_acl_w_user_project(self): - ROLE1 = "role1" - ROLE2 = "role2" - STICKY = {"entity": "allUsers", "role": ROLE2} - USER_PROJECT = "user-project-123" - new_acl = [{"entity": "allUsers", "role": ROLE1}] - connection = _Connection({"acl": [STICKY] + new_acl}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.user_project = USER_PROJECT - - acl.save(new_acl, client=client) - - entries = list(acl) - self.assertEqual(len(entries), 2) - self.assertTrue(STICKY in entries) - self.assertTrue(new_acl[0] in entries) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": {"projection": "full", "userProject": USER_PROJECT}, - "data": {"acl": new_acl}, - }, - ) - - def test_save_prefefined_invalid(self): - connection = _Connection() - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - with self.assertRaises(ValueError): - acl.save_predefined("bogus", client=client) - - def test_save_predefined_valid(self): - PREDEFINED = "private" - connection = _Connection({"acl": []}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.save_predefined(PREDEFINED, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": {"projection": "full", "predefinedAcl": PREDEFINED}, - "data": {"acl": []}, - }, - ) - - def test_save_predefined_w_XML_alias(self): - PREDEFINED_XML = "project-private" - PREDEFINED_JSON = "projectPrivate" - connection = _Connection({"acl": []}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.save_predefined(PREDEFINED_XML, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": { - "projection": "full", - "predefinedAcl": PREDEFINED_JSON, - }, - "data": {"acl": []}, - }, - ) - - def test_save_predefined_valid_w_alternate_query_param(self): - # Cover case where subclass overrides _PREDEFINED_QUERY_PARAM - PREDEFINED = "publicRead" - connection = _Connection({"acl": []}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl._PREDEFINED_QUERY_PARAM = "alternate" - acl.save_predefined(PREDEFINED, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": {"projection": "full", "alternate": PREDEFINED}, - "data": {"acl": []}, - }, - ) - - def test_clear(self): - ROLE1 = "role1" - ROLE2 = "role2" - STICKY = {"entity": "allUsers", "role": ROLE2} - connection = _Connection({"acl": [STICKY]}) - client = _Client(connection) - acl = self._make_one() - acl.save_path = "/testing" - acl.loaded = True - acl.entity("allUsers", ROLE1) - acl.clear(client=client) - self.assertEqual(list(acl), [STICKY]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "PATCH", - "path": "/testing", - "query_params": {"projection": "full"}, - "data": {"acl": []}, - }, - ) - - -class Test_BucketACL(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.acl import BucketACL - - return BucketACL - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - NAME = "name" - bucket = _Bucket(NAME) - acl = self._make_one(bucket) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertIs(acl.bucket, bucket) - self.assertEqual(acl.reload_path, "/b/%s/acl" % NAME) - self.assertEqual(acl.save_path, "/b/%s" % NAME) - - def test_user_project(self): - NAME = "name" - USER_PROJECT = "user-project-123" - bucket = _Bucket(NAME) - acl = self._make_one(bucket) - self.assertIsNone(acl.user_project) - bucket.user_project = USER_PROJECT - self.assertEqual(acl.user_project, USER_PROJECT) - - -class Test_DefaultObjectACL(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.acl import DefaultObjectACL - - return DefaultObjectACL - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - NAME = "name" - bucket = _Bucket(NAME) - acl = self._make_one(bucket) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertIs(acl.bucket, bucket) - self.assertEqual(acl.reload_path, "/b/%s/defaultObjectAcl" % NAME) - self.assertEqual(acl.save_path, "/b/%s" % NAME) - - -class Test_ObjectACL(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.acl import ObjectACL - - return ObjectACL - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - NAME = "name" - BLOB_NAME = "blob-name" - bucket = _Bucket(NAME) - blob = _Blob(bucket, BLOB_NAME) - acl = self._make_one(blob) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertIs(acl.blob, blob) - self.assertEqual(acl.reload_path, "/b/%s/o/%s/acl" % (NAME, BLOB_NAME)) - self.assertEqual(acl.save_path, "/b/%s/o/%s" % (NAME, BLOB_NAME)) - - def test_user_project(self): - NAME = "name" - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - bucket = _Bucket(NAME) - blob = _Blob(bucket, BLOB_NAME) - acl = self._make_one(blob) - self.assertIsNone(acl.user_project) - blob.user_project = USER_PROJECT - self.assertEqual(acl.user_project, USER_PROJECT) - - -class _Blob(object): - - user_project = None - - def __init__(self, bucket, blob): - self.bucket = bucket - self.blob = blob - - @property - def path(self): - return "%s/o/%s" % (self.bucket.path, self.blob) - - -class _Bucket(object): - - user_project = None - - def __init__(self, name): - self.name = name - - @property - def path(self): - return "/b/%s" % self.name - - -class _Connection(object): - _delete_ok = False - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - self._deleted = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _Client(object): - def __init__(self, connection): - self._connection = connection diff --git a/storage/tests/unit/test_batch.py b/storage/tests/unit/test_batch.py deleted file mode 100644 index e18b1b9fada0..000000000000 --- a/storage/tests/unit/test_batch.py +++ /dev/null @@ -1,643 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock -import requests -from six.moves import http_client - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_response(status=http_client.OK, content=b"", headers={}): - response = requests.Response() - response.status_code = status - response._content = content - response.headers = headers - response.request = requests.Request() - return response - - -def _make_requests_session(responses): - session = mock.create_autospec(requests.Session, instance=True) - session.request.side_effect = responses - return session - - -class TestMIMEApplicationHTTP(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.batch import MIMEApplicationHTTP - - return MIMEApplicationHTTP - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor_body_None(self): - METHOD = "DELETE" - PATH = "/path/to/api" - LINES = ["DELETE /path/to/api HTTP/1.1", ""] - mah = self._make_one(METHOD, PATH, {}, None) - self.assertEqual(mah.get_content_type(), "application/http") - self.assertEqual(mah.get_payload().splitlines(), LINES) - - def test_ctor_body_str(self): - METHOD = "GET" - PATH = "/path/to/api" - BODY = "ABC" - HEADERS = {"Content-Length": len(BODY), "Content-Type": "text/plain"} - LINES = [ - "GET /path/to/api HTTP/1.1", - "Content-Length: 3", - "Content-Type: text/plain", - "", - "ABC", - ] - mah = self._make_one(METHOD, PATH, HEADERS, BODY) - self.assertEqual(mah.get_payload().splitlines(), LINES) - - def test_ctor_body_dict(self): - METHOD = "GET" - PATH = "/path/to/api" - BODY = {"foo": "bar"} - HEADERS = {} - LINES = [ - "GET /path/to/api HTTP/1.1", - "Content-Length: 14", - "Content-Type: application/json", - "", - '{"foo": "bar"}', - ] - mah = self._make_one(METHOD, PATH, HEADERS, BODY) - self.assertEqual(mah.get_payload().splitlines(), LINES) - - -class TestBatch(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.batch import Batch - - return Batch - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor(self): - http = _make_requests_session([]) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._make_one(client) - self.assertIs(batch._client, client) - self.assertEqual(len(batch._requests), 0) - self.assertEqual(len(batch._target_objects), 0) - - def test_current(self): - from google.cloud.storage.client import Client - - project = "PROJECT" - credentials = _make_credentials() - client = Client(project=project, credentials=credentials) - batch1 = self._make_one(client) - self.assertIsNone(batch1.current()) - - client._push_batch(batch1) - self.assertIs(batch1.current(), batch1) - - batch2 = self._make_one(client) - client._push_batch(batch2) - self.assertIs(batch1.current(), batch2) - - def test__make_request_GET_normal(self): - from google.cloud.storage.batch import _FutureDict - - url = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - target = _MockObject() - - response = batch._make_request("GET", url, target_object=target) - - # Check the respone - self.assertEqual(response.status_code, 204) - self.assertIsInstance(response.json(), _FutureDict) - self.assertIsInstance(response.content, _FutureDict) - self.assertIs(target._properties, response.content) - - # The real http request should not have been called yet. - http.request.assert_not_called() - - # Check the queued request - self.assertEqual(len(batch._requests), 1) - request = batch._requests[0] - request_method, request_url, _, request_data, _ = request - self.assertEqual(request_method, "GET") - self.assertEqual(request_url, url) - self.assertIsNone(request_data) - - def test__make_request_POST_normal(self): - from google.cloud.storage.batch import _FutureDict - - url = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - data = {"foo": 1} - target = _MockObject() - - response = batch._make_request( - "POST", url, data={"foo": 1}, target_object=target - ) - - self.assertEqual(response.status_code, 204) - self.assertIsInstance(response.content, _FutureDict) - self.assertIs(target._properties, response.content) - - # The real http request should not have been called yet. - http.request.assert_not_called() - - request = batch._requests[0] - request_method, request_url, _, request_data, _ = request - self.assertEqual(request_method, "POST") - self.assertEqual(request_url, url) - self.assertEqual(request_data, data) - - def test__make_request_PATCH_normal(self): - from google.cloud.storage.batch import _FutureDict - - url = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - data = {"foo": 1} - target = _MockObject() - - response = batch._make_request( - "PATCH", url, data={"foo": 1}, target_object=target - ) - - self.assertEqual(response.status_code, 204) - self.assertIsInstance(response.content, _FutureDict) - self.assertIs(target._properties, response.content) - - # The real http request should not have been called yet. - http.request.assert_not_called() - - request = batch._requests[0] - request_method, request_url, _, request_data, _ = request - self.assertEqual(request_method, "PATCH") - self.assertEqual(request_url, url) - self.assertEqual(request_data, data) - - def test__make_request_DELETE_normal(self): - from google.cloud.storage.batch import _FutureDict - - url = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - target = _MockObject() - - response = batch._make_request("DELETE", url, target_object=target) - - # Check the respone - self.assertEqual(response.status_code, 204) - self.assertIsInstance(response.content, _FutureDict) - self.assertIs(target._properties, response.content) - - # The real http request should not have been called yet. - http.request.assert_not_called() - - # Check the queued request - self.assertEqual(len(batch._requests), 1) - request = batch._requests[0] - request_method, request_url, _, request_data, _ = request - self.assertEqual(request_method, "DELETE") - self.assertEqual(request_url, url) - self.assertIsNone(request_data) - - def test__make_request_POST_too_many_requests(self): - url = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - - batch._MAX_BATCH_SIZE = 1 - batch._requests.append(("POST", url, {}, {"bar": 2})) - - with self.assertRaises(ValueError): - batch._make_request("POST", url, data={"foo": 1}) - - def test_finish_empty(self): - http = _make_requests_session([]) - connection = _Connection(http=http) - batch = self._make_one(connection) - - with self.assertRaises(ValueError): - batch.finish() - - def _get_payload_chunks(self, boundary, payload): - divider = "--" + boundary[len('boundary="') : -1] - chunks = payload.split(divider)[1:-1] # discard prolog / epilog - return chunks - - def _check_subrequest_no_payload(self, chunk, method, url): - lines = chunk.splitlines() - # blank + 2 headers + blank + request + blank + blank - self.assertEqual(len(lines), 7) - self.assertEqual(lines[0], "") - self.assertEqual(lines[1], "Content-Type: application/http") - self.assertEqual(lines[2], "MIME-Version: 1.0") - self.assertEqual(lines[3], "") - self.assertEqual(lines[4], "%s %s HTTP/1.1" % (method, url)) - self.assertEqual(lines[5], "") - self.assertEqual(lines[6], "") - - def _check_subrequest_payload(self, chunk, method, url, payload): - import json - - lines = chunk.splitlines() - # blank + 2 headers + blank + request + 2 headers + blank + body - payload_str = json.dumps(payload) - self.assertEqual(lines[0], "") - self.assertEqual(lines[1], "Content-Type: application/http") - self.assertEqual(lines[2], "MIME-Version: 1.0") - self.assertEqual(lines[3], "") - self.assertEqual(lines[4], "%s %s HTTP/1.1" % (method, url)) - if method == "GET": - self.assertEqual(len(lines), 7) - self.assertEqual(lines[5], "") - self.assertEqual(lines[6], "") - else: - self.assertEqual(len(lines), 9) - self.assertEqual(lines[5], "Content-Length: %d" % len(payload_str)) - self.assertEqual(lines[6], "Content-Type: application/json") - self.assertEqual(lines[7], "") - self.assertEqual(json.loads(lines[8]), payload) - - def _get_mutlipart_request(self, http): - request_call = http.request.mock_calls[0][2] - request_headers = request_call["headers"] - request_body = request_call["data"] - content_type, boundary = [ - value.strip() for value in request_headers["Content-Type"].split(";") - ] - - return request_headers, request_body, content_type, boundary - - def test_finish_nonempty(self): - url = "http://api.example.com/other_api" - expected_response = _make_response( - content=_THREE_PART_MIME_RESPONSE, - headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'}, - ) - http = _make_requests_session([expected_response]) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._make_one(client) - batch.API_BASE_URL = "http://api.example.com" - - batch._do_request("POST", url, {}, {"foo": 1, "bar": 2}, None) - batch._do_request("PATCH", url, {}, {"bar": 3}, None) - batch._do_request("DELETE", url, {}, None, None) - result = batch.finish() - - self.assertEqual(len(result), len(batch._requests)) - - response1, response2, response3 = result - - self.assertEqual( - response1.headers, - {"Content-Length": "20", "Content-Type": "application/json; charset=UTF-8"}, - ) - self.assertEqual(response1.json(), {"foo": 1, "bar": 2}) - - self.assertEqual( - response2.headers, - {"Content-Length": "20", "Content-Type": "application/json; charset=UTF-8"}, - ) - self.assertEqual(response2.json(), {"foo": 1, "bar": 3}) - - self.assertEqual(response3.headers, {"Content-Length": "0"}) - self.assertEqual(response3.status_code, http_client.NO_CONTENT) - - expected_url = "{}/batch/storage/v1".format(batch.API_BASE_URL) - http.request.assert_called_once_with( - method="POST", - url=expected_url, - headers=mock.ANY, - data=mock.ANY, - timeout=mock.ANY, - ) - - request_info = self._get_mutlipart_request(http) - request_headers, request_body, content_type, boundary = request_info - - self.assertEqual(content_type, "multipart/mixed") - self.assertTrue(boundary.startswith('boundary="==')) - self.assertTrue(boundary.endswith('=="')) - self.assertEqual(request_headers["MIME-Version"], "1.0") - - chunks = self._get_payload_chunks(boundary, request_body) - self.assertEqual(len(chunks), 3) - self._check_subrequest_payload(chunks[0], "POST", url, {"foo": 1, "bar": 2}) - self._check_subrequest_payload(chunks[1], "PATCH", url, {"bar": 3}) - self._check_subrequest_no_payload(chunks[2], "DELETE", url) - - def test_finish_responses_mismatch(self): - url = "http://api.example.com/other_api" - expected_response = _make_response( - content=_TWO_PART_MIME_RESPONSE_WITH_FAIL, - headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'}, - ) - http = _make_requests_session([expected_response]) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._make_one(client) - batch.API_BASE_URL = "http://api.example.com" - - batch._requests.append(("GET", url, {}, None)) - with self.assertRaises(ValueError): - batch.finish() - - def test_finish_nonempty_with_status_failure(self): - from google.cloud.exceptions import NotFound - - url = "http://api.example.com/other_api" - expected_response = _make_response( - content=_TWO_PART_MIME_RESPONSE_WITH_FAIL, - headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'}, - ) - http = _make_requests_session([expected_response]) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._make_one(client) - batch.API_BASE_URL = "http://api.example.com" - target1 = _MockObject() - target2 = _MockObject() - - batch._do_request("GET", url, {}, None, target1) - batch._do_request("GET", url, {}, None, target2) - - # Make sure futures are not populated. - self.assertEqual( - [future for future in batch._target_objects], [target1, target2] - ) - target2_future_before = target2._properties - - with self.assertRaises(NotFound): - batch.finish() - - self.assertEqual(target1._properties, {"foo": 1, "bar": 2}) - self.assertIs(target2._properties, target2_future_before) - - expected_url = "{}/batch/storage/v1".format(batch.API_BASE_URL) - http.request.assert_called_once_with( - method="POST", - url=expected_url, - headers=mock.ANY, - data=mock.ANY, - timeout=mock.ANY, - ) - - _, request_body, _, boundary = self._get_mutlipart_request(http) - - chunks = self._get_payload_chunks(boundary, request_body) - self.assertEqual(len(chunks), 2) - self._check_subrequest_payload(chunks[0], "GET", url, {}) - self._check_subrequest_payload(chunks[1], "GET", url, {}) - - def test_finish_nonempty_non_multipart_response(self): - url = "http://api.example.com/other_api" - http = _make_requests_session([_make_response()]) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._make_one(client) - batch._requests.append(("POST", url, {}, {"foo": 1, "bar": 2})) - - with self.assertRaises(ValueError): - batch.finish() - - def test_as_context_mgr_wo_error(self): - from google.cloud.storage.client import Client - - url = "http://example.com/api" - expected_response = _make_response( - content=_THREE_PART_MIME_RESPONSE, - headers={"content-type": 'multipart/mixed; boundary="DEADBEEF="'}, - ) - http = _make_requests_session([expected_response]) - project = "PROJECT" - credentials = _make_credentials() - client = Client(project=project, credentials=credentials) - client._http_internal = http - - self.assertEqual(list(client._batch_stack), []) - - target1 = _MockObject() - target2 = _MockObject() - target3 = _MockObject() - - with self._make_one(client) as batch: - self.assertEqual(list(client._batch_stack), [batch]) - batch._make_request( - "POST", url, {"foo": 1, "bar": 2}, target_object=target1 - ) - batch._make_request("PATCH", url, {"bar": 3}, target_object=target2) - batch._make_request("DELETE", url, target_object=target3) - - self.assertEqual(list(client._batch_stack), []) - self.assertEqual(len(batch._requests), 3) - self.assertEqual(batch._requests[0][0], "POST") - self.assertEqual(batch._requests[1][0], "PATCH") - self.assertEqual(batch._requests[2][0], "DELETE") - self.assertEqual(batch._target_objects, [target1, target2, target3]) - self.assertEqual(target1._properties, {"foo": 1, "bar": 2}) - self.assertEqual(target2._properties, {"foo": 1, "bar": 3}) - self.assertEqual(target3._properties, b"") - - def test_as_context_mgr_w_error(self): - from google.cloud.storage.batch import _FutureDict - from google.cloud.storage.client import Client - - URL = "http://example.com/api" - http = _make_requests_session([]) - connection = _Connection(http=http) - project = "PROJECT" - credentials = _make_credentials() - client = Client(project=project, credentials=credentials) - client._base_connection = connection - - self.assertEqual(list(client._batch_stack), []) - - target1 = _MockObject() - target2 = _MockObject() - target3 = _MockObject() - try: - with self._make_one(client) as batch: - self.assertEqual(list(client._batch_stack), [batch]) - batch._make_request( - "POST", URL, {"foo": 1, "bar": 2}, target_object=target1 - ) - batch._make_request("PATCH", URL, {"bar": 3}, target_object=target2) - batch._make_request("DELETE", URL, target_object=target3) - raise ValueError() - except ValueError: - pass - - http.request.assert_not_called() - self.assertEqual(list(client._batch_stack), []) - self.assertEqual(len(batch._requests), 3) - self.assertEqual(batch._target_objects, [target1, target2, target3]) - # Since the context manager fails, finish will not get called and - # the _properties will still be futures. - self.assertIsInstance(target1._properties, _FutureDict) - self.assertIsInstance(target2._properties, _FutureDict) - self.assertIsInstance(target3._properties, _FutureDict) - - -class Test__unpack_batch_response(unittest.TestCase): - def _call_fut(self, headers, content): - from google.cloud.storage.batch import _unpack_batch_response - - response = _make_response(content=content, headers=headers) - - return _unpack_batch_response(response) - - def _unpack_helper(self, response, content): - result = list(self._call_fut(response, content)) - self.assertEqual(len(result), 3) - - self.assertEqual(result[0].status_code, http_client.OK) - self.assertEqual(result[0].json(), {u"bar": 2, u"foo": 1}) - self.assertEqual(result[1].status_code, http_client.OK) - self.assertEqual(result[1].json(), {u"foo": 1, u"bar": 3}) - self.assertEqual(result[2].status_code, http_client.NO_CONTENT) - - def test_bytes_headers(self): - RESPONSE = {"content-type": b'multipart/mixed; boundary="DEADBEEF="'} - CONTENT = _THREE_PART_MIME_RESPONSE - self._unpack_helper(RESPONSE, CONTENT) - - def test_unicode_headers(self): - RESPONSE = {"content-type": u'multipart/mixed; boundary="DEADBEEF="'} - CONTENT = _THREE_PART_MIME_RESPONSE - self._unpack_helper(RESPONSE, CONTENT) - - -_TWO_PART_MIME_RESPONSE_WITH_FAIL = b"""\ ---DEADBEEF= -Content-Type: application/json -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 2} - ---DEADBEEF= -Content-Type: application/json -Content-ID: - -HTTP/1.1 404 Not Found -Content-Type: application/json; charset=UTF-8 -Content-Length: 35 - -{"error": {"message": "Not Found"}} - ---DEADBEEF=-- -""" - -_THREE_PART_MIME_RESPONSE = b"""\ ---DEADBEEF= -Content-Type: application/json -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 2} - ---DEADBEEF= -Content-Type: application/json -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 3} - ---DEADBEEF= -Content-Type: text/plain -Content-ID: - -HTTP/1.1 204 No Content -Content-Length: 0 - ---DEADBEEF=-- -""" - - -class Test__FutureDict(unittest.TestCase): - def _make_one(self, *args, **kw): - from google.cloud.storage.batch import _FutureDict - - return _FutureDict(*args, **kw) - - def test_get(self): - future = self._make_one() - self.assertRaises(KeyError, future.get, None) - - def test___getitem__(self): - future = self._make_one() - value = orig_value = object() - with self.assertRaises(KeyError): - value = future[None] - self.assertIs(value, orig_value) - - def test___setitem__(self): - future = self._make_one() - with self.assertRaises(KeyError): - future[None] = None - - -class _Connection(object): - - project = "TESTING" - - def __init__(self, **kw): - self.__dict__.update(kw) - - def _make_request(self, method, url, data=None, headers=None, timeout=None): - return self.http.request( - url=url, method=method, headers=headers, data=data, timeout=timeout - ) - - -class _MockObject(object): - pass - - -class _Client(object): - def __init__(self, connection): - self._base_connection = connection diff --git a/storage/tests/unit/test_blob.py b/storage/tests/unit/test_blob.py deleted file mode 100644 index 746e659c5785..000000000000 --- a/storage/tests/unit/test_blob.py +++ /dev/null @@ -1,3363 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import base64 -import datetime -import hashlib -import io -import json -import os -import tempfile -import unittest - -import mock -import pytest -import six -from six.moves import http_client - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -class Test_Blob(unittest.TestCase): - @staticmethod - def _make_one(*args, **kw): - from google.cloud.storage.blob import Blob - - properties = kw.pop("properties", {}) - blob = Blob(*args, **kw) - blob._properties.update(properties) - return blob - - def test_ctor_wo_encryption_key(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {"key": "value"} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertIs(blob.bucket, bucket) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob._properties, properties) - self.assertFalse(blob._acl.loaded) - self.assertIs(blob._acl.blob, blob) - self.assertEqual(blob._encryption_key, None) - self.assertEqual(blob.kms_key_name, None) - - def test_ctor_with_encoded_unicode(self): - blob_name = b"wet \xe2\x9b\xb5" - blob = self._make_one(blob_name, bucket=None) - unicode_name = u"wet \N{sailboat}" - self.assertNotIsInstance(blob.name, bytes) - self.assertIsInstance(blob.name, six.text_type) - self.assertEqual(blob.name, unicode_name) - - def test_ctor_w_encryption_key(self): - KEY = b"01234567890123456789012345678901" # 32 bytes - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) - self.assertEqual(blob._encryption_key, KEY) - self.assertEqual(blob.kms_key_name, None) - - def test_ctor_w_kms_key_name_and_encryption_key(self): - KEY = b"01234567890123456789012345678901" # 32 bytes - KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - BLOB_NAME = "blob-name" - bucket = _Bucket() - - with self.assertRaises(ValueError): - self._make_one( - BLOB_NAME, bucket=bucket, encryption_key=KEY, kms_key_name=KMS_RESOURCE - ) - - def test_ctor_w_kms_key_name(self): - KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=KMS_RESOURCE) - self.assertEqual(blob._encryption_key, None) - self.assertEqual(blob.kms_key_name, KMS_RESOURCE) - - def test_ctor_with_generation(self): - BLOB_NAME = "blob-name" - GENERATION = 12345 - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) - self.assertEqual(blob.generation, GENERATION) - - def _set_properties_helper(self, kms_key_name=None): - import datetime - from google.cloud._helpers import UTC - from google.cloud._helpers import _RFC3339_MICROS - - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - NOW = now.strftime(_RFC3339_MICROS) - BLOB_NAME = "blob-name" - GENERATION = 12345 - BLOB_ID = "name/{}/{}".format(BLOB_NAME, GENERATION) - SELF_LINK = "http://example.com/self/" - METAGENERATION = 23456 - SIZE = 12345 - MD5_HASH = "DEADBEEF" - MEDIA_LINK = "http://example.com/media/" - ENTITY = "project-owner-12345" - ENTITY_ID = "23456" - CRC32C = "FACE0DAC" - COMPONENT_COUNT = 2 - ETAG = "ETAG" - resource = { - "id": BLOB_ID, - "selfLink": SELF_LINK, - "generation": GENERATION, - "metageneration": METAGENERATION, - "contentType": "text/plain", - "timeCreated": NOW, - "updated": NOW, - "timeDeleted": NOW, - "storageClass": "NEARLINE", - "timeStorageClassUpdated": NOW, - "size": SIZE, - "md5Hash": MD5_HASH, - "mediaLink": MEDIA_LINK, - "contentEncoding": "gzip", - "contentDisposition": "inline", - "contentLanguage": "en-US", - "cacheControl": "private", - "metadata": {"foo": "Foo"}, - "owner": {"entity": ENTITY, "entityId": ENTITY_ID}, - "crc32c": CRC32C, - "componentCount": COMPONENT_COUNT, - "etag": ETAG, - } - - if kms_key_name is not None: - resource["kmsKeyName"] = kms_key_name - - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - - blob._set_properties(resource) - - self.assertEqual(blob.id, BLOB_ID) - self.assertEqual(blob.self_link, SELF_LINK) - self.assertEqual(blob.generation, GENERATION) - self.assertEqual(blob.metageneration, METAGENERATION) - self.assertEqual(blob.content_type, "text/plain") - self.assertEqual(blob.time_created, now) - self.assertEqual(blob.updated, now) - self.assertEqual(blob.time_deleted, now) - self.assertEqual(blob.storage_class, "NEARLINE") - self.assertEqual(blob.size, SIZE) - self.assertEqual(blob.md5_hash, MD5_HASH) - self.assertEqual(blob.media_link, MEDIA_LINK) - self.assertEqual(blob.content_encoding, "gzip") - self.assertEqual(blob.content_disposition, "inline") - self.assertEqual(blob.content_language, "en-US") - self.assertEqual(blob.cache_control, "private") - self.assertEqual(blob.metadata, {"foo": "Foo"}) - self.assertEqual(blob.owner, {"entity": ENTITY, "entityId": ENTITY_ID}) - self.assertEqual(blob.crc32c, CRC32C) - self.assertEqual(blob.component_count, COMPONENT_COUNT) - self.assertEqual(blob.etag, ETAG) - - if kms_key_name is not None: - self.assertEqual(blob.kms_key_name, kms_key_name) - else: - self.assertIsNone(blob.kms_key_name) - - def test__set_properties_wo_kms_key_name(self): - self._set_properties_helper() - - def test__set_properties_w_kms_key_name(self): - kms_resource = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - self._set_properties_helper(kms_key_name=kms_resource) - - def test_chunk_size_ctor(self): - from google.cloud.storage.blob import Blob - - BLOB_NAME = "blob-name" - BUCKET = object() - chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE - blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size) - self.assertEqual(blob._chunk_size, chunk_size) - - def test_chunk_size_getter(self): - BLOB_NAME = "blob-name" - BUCKET = object() - blob = self._make_one(BLOB_NAME, bucket=BUCKET) - self.assertIsNone(blob.chunk_size) - VALUE = object() - blob._chunk_size = VALUE - self.assertIs(blob.chunk_size, VALUE) - - def test_chunk_size_setter(self): - BLOB_NAME = "blob-name" - BUCKET = object() - blob = self._make_one(BLOB_NAME, bucket=BUCKET) - self.assertIsNone(blob._chunk_size) - blob._CHUNK_SIZE_MULTIPLE = 10 - blob.chunk_size = 20 - self.assertEqual(blob._chunk_size, 20) - - def test_chunk_size_setter_bad_value(self): - BLOB_NAME = "blob-name" - BUCKET = object() - blob = self._make_one(BLOB_NAME, bucket=BUCKET) - self.assertIsNone(blob._chunk_size) - blob._CHUNK_SIZE_MULTIPLE = 10 - with self.assertRaises(ValueError): - blob.chunk_size = 11 - - def test_acl_property(self): - from google.cloud.storage.acl import ObjectACL - - fake_bucket = _Bucket() - blob = self._make_one(u"name", bucket=fake_bucket) - acl = blob.acl - self.assertIsInstance(acl, ObjectACL) - self.assertIs(acl, blob._acl) - - def test_path_bad_bucket(self): - fake_bucket = object() - name = u"blob-name" - blob = self._make_one(name, bucket=fake_bucket) - self.assertRaises(AttributeError, getattr, blob, "path") - - def test_path_no_name(self): - bucket = _Bucket() - blob = self._make_one(u"", bucket=bucket) - self.assertRaises(ValueError, getattr, blob, "path") - - def test_path_normal(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.path, "/b/name/o/%s" % BLOB_NAME) - - def test_path_w_slash_in_name(self): - BLOB_NAME = "parent/child" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.path, "/b/name/o/parent%2Fchild") - - def test_path_with_non_ascii(self): - blob_name = u"Caf\xe9" - bucket = _Bucket() - blob = self._make_one(blob_name, bucket=bucket) - self.assertEqual(blob.path, "/b/name/o/Caf%C3%A9") - - def test_bucket_readonly_property(self): - blob_name = "BLOB" - bucket = _Bucket() - other = _Bucket() - blob = self._make_one(blob_name, bucket=bucket) - with self.assertRaises(AttributeError): - blob.bucket = other - - def test_client(self): - blob_name = "BLOB" - bucket = _Bucket() - blob = self._make_one(blob_name, bucket=bucket) - self.assertIs(blob.client, bucket.client) - - def test_user_project(self): - user_project = "user-project-123" - blob_name = "BLOB" - bucket = _Bucket(user_project=user_project) - blob = self._make_one(blob_name, bucket=bucket) - self.assertEqual(blob.user_project, user_project) - - def test__encryption_headers_wo_encryption_key(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - expected = {} - self.assertEqual(blob._encryption_headers(), expected) - - def test__encryption_headers_w_encryption_key(self): - key = b"aa426195405adee2c8081bb9e7e74b19" - header_key_value = "YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk=" - header_key_hash_value = "V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0=" - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=key) - expected = { - "X-Goog-Encryption-Algorithm": "AES256", - "X-Goog-Encryption-Key": header_key_value, - "X-Goog-Encryption-Key-Sha256": header_key_hash_value, - } - self.assertEqual(blob._encryption_headers(), expected) - - def test__query_params_default(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual(blob._query_params, {}) - - def test__query_params_w_user_project(self): - user_project = "user-project-123" - BLOB_NAME = "BLOB" - bucket = _Bucket(user_project=user_project) - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual(blob._query_params, {"userProject": user_project}) - - def test__query_params_w_generation(self): - generation = 123456 - BLOB_NAME = "BLOB" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket, generation=generation) - self.assertEqual(blob._query_params, {"generation": generation}) - - def test_public_url(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual( - blob.public_url, "https://storage.googleapis.com/name/%s" % BLOB_NAME - ) - - def test_public_url_w_slash_in_name(self): - BLOB_NAME = "parent/child" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual( - blob.public_url, "https://storage.googleapis.com/name/parent/child" - ) - - def test_public_url_w_tilde_in_name(self): - BLOB_NAME = "foo~bar" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.public_url, "https://storage.googleapis.com/name/foo~bar") - - def test_public_url_with_non_ascii(self): - blob_name = u"winter \N{snowman}" - bucket = _Bucket() - blob = self._make_one(blob_name, bucket=bucket) - expected_url = "https://storage.googleapis.com/name/winter%20%E2%98%83" - self.assertEqual(blob.public_url, expected_url) - - def test_generate_signed_url_w_invalid_version(self): - BLOB_NAME = "blob-name" - EXPIRATION = "2014-10-16T20:34:37.000Z" - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - with self.assertRaises(ValueError): - blob.generate_signed_url(EXPIRATION, version="nonesuch") - - def _generate_signed_url_helper( - self, - version=None, - blob_name="blob-name", - api_access_endpoint=None, - method="GET", - content_md5=None, - content_type=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - credentials=None, - expiration=None, - encryption_key=None, - access_token=None, - service_account_email=None, - ): - from six.moves.urllib import parse - from google.cloud._helpers import UTC - from google.cloud.storage.blob import _API_ACCESS_ENDPOINT - from google.cloud.storage.blob import _get_encryption_headers - - api_access_endpoint = api_access_endpoint or _API_ACCESS_ENDPOINT - - delta = datetime.timedelta(hours=1) - - if expiration is None: - expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) + delta - - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(blob_name, bucket=bucket, encryption_key=encryption_key) - - if version is None: - effective_version = "v2" - else: - effective_version = version - - to_patch = "google.cloud.storage.blob.generate_signed_url_{}".format( - effective_version - ) - - with mock.patch(to_patch) as signer: - signed_uri = blob.generate_signed_url( - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method, - credentials=credentials, - content_md5=content_md5, - content_type=content_type, - response_type=response_type, - response_disposition=response_disposition, - generation=generation, - headers=headers, - query_parameters=query_parameters, - version=version, - access_token=access_token, - service_account_email=service_account_email, - ) - - self.assertEqual(signed_uri, signer.return_value) - - if credentials is None: - expected_creds = _Connection.credentials - else: - expected_creds = credentials - - encoded_name = blob_name.encode("utf-8") - expected_resource = "/name/{}".format(parse.quote(encoded_name, safe=b"/~")) - if encryption_key is not None: - expected_headers = headers or {} - if effective_version == "v2": - expected_headers["X-Goog-Encryption-Algorithm"] = "AES256" - else: - expected_headers.update(_get_encryption_headers(encryption_key)) - else: - expected_headers = headers - - expected_kwargs = { - "resource": expected_resource, - "expiration": expiration, - "api_access_endpoint": api_access_endpoint, - "method": method.upper(), - "content_md5": content_md5, - "content_type": content_type, - "response_type": response_type, - "response_disposition": response_disposition, - "generation": generation, - "headers": expected_headers, - "query_parameters": query_parameters, - "access_token": access_token, - "service_account_email": service_account_email, - } - signer.assert_called_once_with(expected_creds, **expected_kwargs) - - def test_generate_signed_url_no_version_passed_warning(self): - self._generate_signed_url_helper() - - def _generate_signed_url_v2_helper(self, **kw): - version = "v2" - self._generate_signed_url_helper(version, **kw) - - def test_generate_signed_url_v2_w_defaults(self): - self._generate_signed_url_v2_helper() - - def test_generate_signed_url_v2_w_expiration(self): - from google.cloud._helpers import UTC - - expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) - self._generate_signed_url_v2_helper(expiration=expiration) - - def test_generate_signed_url_v2_w_non_ascii_name(self): - BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" - self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v2_w_slash_in_name(self): - BLOB_NAME = "parent/child" - self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v2_w_tilde_in_name(self): - BLOB_NAME = "foo~bar" - self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v2_w_endpoint(self): - self._generate_signed_url_v2_helper( - api_access_endpoint="https://api.example.com/v1" - ) - - def test_generate_signed_url_v2_w_method(self): - self._generate_signed_url_v2_helper(method="POST") - - def test_generate_signed_url_v2_w_lowercase_method(self): - self._generate_signed_url_v2_helper(method="get") - - def test_generate_signed_url_v2_w_content_md5(self): - self._generate_signed_url_v2_helper(content_md5="FACEDACE") - - def test_generate_signed_url_v2_w_content_type(self): - self._generate_signed_url_v2_helper(content_type="text.html") - - def test_generate_signed_url_v2_w_response_type(self): - self._generate_signed_url_v2_helper(response_type="text.html") - - def test_generate_signed_url_v2_w_response_disposition(self): - self._generate_signed_url_v2_helper(response_disposition="inline") - - def test_generate_signed_url_v2_w_generation(self): - self._generate_signed_url_v2_helper(generation=12345) - - def test_generate_signed_url_v2_w_headers(self): - self._generate_signed_url_v2_helper(headers={"x-goog-foo": "bar"}) - - def test_generate_signed_url_v2_w_csek(self): - self._generate_signed_url_v2_helper(encryption_key=os.urandom(32)) - - def test_generate_signed_url_v2_w_csek_and_headers(self): - self._generate_signed_url_v2_helper( - encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} - ) - - def test_generate_signed_url_v2_w_credentials(self): - credentials = object() - self._generate_signed_url_v2_helper(credentials=credentials) - - def _generate_signed_url_v4_helper(self, **kw): - version = "v4" - self._generate_signed_url_helper(version, **kw) - - def test_generate_signed_url_v4_w_defaults(self): - self._generate_signed_url_v4_helper() - - def test_generate_signed_url_v4_w_non_ascii_name(self): - BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" - self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v4_w_slash_in_name(self): - BLOB_NAME = "parent/child" - self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v4_w_tilde_in_name(self): - BLOB_NAME = "foo~bar" - self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) - - def test_generate_signed_url_v4_w_endpoint(self): - self._generate_signed_url_v4_helper( - api_access_endpoint="https://api.example.com/v1" - ) - - def test_generate_signed_url_v4_w_method(self): - self._generate_signed_url_v4_helper(method="POST") - - def test_generate_signed_url_v4_w_lowercase_method(self): - self._generate_signed_url_v4_helper(method="get") - - def test_generate_signed_url_v4_w_content_md5(self): - self._generate_signed_url_v4_helper(content_md5="FACEDACE") - - def test_generate_signed_url_v4_w_content_type(self): - self._generate_signed_url_v4_helper(content_type="text.html") - - def test_generate_signed_url_v4_w_response_type(self): - self._generate_signed_url_v4_helper(response_type="text.html") - - def test_generate_signed_url_v4_w_response_disposition(self): - self._generate_signed_url_v4_helper(response_disposition="inline") - - def test_generate_signed_url_v4_w_generation(self): - self._generate_signed_url_v4_helper(generation=12345) - - def test_generate_signed_url_v4_w_headers(self): - self._generate_signed_url_v4_helper(headers={"x-goog-foo": "bar"}) - - def test_generate_signed_url_v4_w_csek(self): - self._generate_signed_url_v4_helper(encryption_key=os.urandom(32)) - - def test_generate_signed_url_v4_w_csek_and_headers(self): - self._generate_signed_url_v4_helper( - encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} - ) - - def test_generate_signed_url_v4_w_credentials(self): - credentials = object() - self._generate_signed_url_v4_helper(credentials=credentials) - - def test_exists_miss(self): - NONESUCH = "nonesuch" - not_found_response = ({"status": http_client.NOT_FOUND}, b"") - connection = _Connection(not_found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(NONESUCH, bucket=bucket) - self.assertFalse(blob.exists()) - self.assertEqual(len(connection._requested), 1) - self.assertEqual( - connection._requested[0], - { - "method": "GET", - "path": "/b/name/o/{}".format(NONESUCH), - "query_params": {"fields": "name"}, - "_target_object": None, - }, - ) - - def test_exists_hit_w_user_project(self): - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - found_response = ({"status": http_client.OK}, b"") - connection = _Connection(found_response) - client = _Client(connection) - bucket = _Bucket(client, user_project=USER_PROJECT) - blob = self._make_one(BLOB_NAME, bucket=bucket) - bucket._blobs[BLOB_NAME] = 1 - self.assertTrue(blob.exists()) - self.assertEqual(len(connection._requested), 1) - self.assertEqual( - connection._requested[0], - { - "method": "GET", - "path": "/b/name/o/{}".format(BLOB_NAME), - "query_params": {"fields": "name", "userProject": USER_PROJECT}, - "_target_object": None, - }, - ) - - def test_exists_hit_w_generation(self): - BLOB_NAME = "blob-name" - GENERATION = 123456 - found_response = ({"status": http_client.OK}, b"") - connection = _Connection(found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) - bucket._blobs[BLOB_NAME] = 1 - self.assertTrue(blob.exists()) - self.assertEqual(len(connection._requested), 1) - self.assertEqual( - connection._requested[0], - { - "method": "GET", - "path": "/b/name/o/{}".format(BLOB_NAME), - "query_params": {"fields": "name", "generation": GENERATION}, - "_target_object": None, - }, - ) - - def test_delete_wo_generation(self): - BLOB_NAME = "blob-name" - not_found_response = ({"status": http_client.NOT_FOUND}, b"") - connection = _Connection(not_found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - bucket._blobs[BLOB_NAME] = 1 - blob.delete() - self.assertFalse(blob.exists()) - self.assertEqual(bucket._deleted, [(BLOB_NAME, None, None)]) - - def test_delete_w_generation(self): - BLOB_NAME = "blob-name" - GENERATION = 123456 - not_found_response = ({"status": http_client.NOT_FOUND}, b"") - connection = _Connection(not_found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) - bucket._blobs[BLOB_NAME] = 1 - blob.delete() - self.assertFalse(blob.exists()) - self.assertEqual(bucket._deleted, [(BLOB_NAME, None, GENERATION)]) - - def test__get_transport(self): - client = mock.Mock(spec=[u"_credentials", "_http"]) - client._http = mock.sentinel.transport - blob = self._make_one(u"blob-name", bucket=None) - - transport = blob._get_transport(client) - - self.assertIs(transport, mock.sentinel.transport) - - def test__get_download_url_with_media_link(self): - blob_name = "something.txt" - bucket = _Bucket(name="IRRELEVANT") - blob = self._make_one(blob_name, bucket=bucket) - media_link = "http://test.invalid" - # Set the media link on the blob - blob._properties["mediaLink"] = media_link - - download_url = blob._get_download_url() - self.assertEqual(download_url, media_link) - - def test__get_download_url_with_media_link_w_user_project(self): - blob_name = "something.txt" - user_project = "user-project-123" - bucket = _Bucket(name="IRRELEVANT", user_project=user_project) - blob = self._make_one(blob_name, bucket=bucket) - media_link = "http://test.invalid" - # Set the media link on the blob - blob._properties["mediaLink"] = media_link - - download_url = blob._get_download_url() - self.assertEqual( - download_url, "{}?userProject={}".format(media_link, user_project) - ) - - def test__get_download_url_on_the_fly(self): - blob_name = "bzzz-fly.txt" - bucket = _Bucket(name="buhkit") - blob = self._make_one(blob_name, bucket=bucket) - - self.assertIsNone(blob.media_link) - download_url = blob._get_download_url() - expected_url = ( - "https://storage.googleapis.com/download/storage/v1/b/" - "buhkit/o/bzzz-fly.txt?alt=media" - ) - self.assertEqual(download_url, expected_url) - - def test__get_download_url_on_the_fly_with_generation(self): - blob_name = "pretend.txt" - bucket = _Bucket(name="fictional") - blob = self._make_one(blob_name, bucket=bucket) - generation = 1493058489532987 - # Set the media link on the blob - blob._properties["generation"] = str(generation) - - self.assertIsNone(blob.media_link) - download_url = blob._get_download_url() - expected_url = ( - "https://storage.googleapis.com/download/storage/v1/b/" - "fictional/o/pretend.txt?alt=media&generation=1493058489532987" - ) - self.assertEqual(download_url, expected_url) - - def test__get_download_url_on_the_fly_with_user_project(self): - blob_name = "pretend.txt" - user_project = "user-project-123" - bucket = _Bucket(name="fictional", user_project=user_project) - blob = self._make_one(blob_name, bucket=bucket) - - self.assertIsNone(blob.media_link) - download_url = blob._get_download_url() - expected_url = ( - "https://storage.googleapis.com/download/storage/v1/b/" - "fictional/o/pretend.txt?alt=media&userProject={}".format(user_project) - ) - self.assertEqual(download_url, expected_url) - - def test__get_download_url_on_the_fly_with_kms_key_name(self): - kms_resource = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - blob_name = "bzzz-fly.txt" - bucket = _Bucket(name="buhkit") - blob = self._make_one(blob_name, bucket=bucket, kms_key_name=kms_resource) - - self.assertIsNone(blob.media_link) - download_url = blob._get_download_url() - expected_url = ( - "https://storage.googleapis.com/download/storage/v1/b/" - "buhkit/o/bzzz-fly.txt?alt=media" - ) - self.assertEqual(download_url, expected_url) - - @staticmethod - def _mock_requests_response(status_code, headers, content=b""): - import requests - - response = requests.Response() - response.status_code = status_code - response.headers.update(headers) - response.raw = None - response._content = content - - response.request = requests.Request("POST", "http://example.com").prepare() - return response - - def _do_download_helper_wo_chunks(self, w_range, raw_download): - blob_name = "blob-name" - client = mock.Mock() - bucket = _Bucket(client) - blob = self._make_one(blob_name, bucket=bucket) - self.assertIsNone(blob.chunk_size) - - transport = object() - file_obj = io.BytesIO() - download_url = "http://test.invalid" - headers = {} - - if raw_download: - patch = mock.patch("google.cloud.storage.blob.RawDownload") - else: - patch = mock.patch("google.cloud.storage.blob.Download") - - with patch as patched: - if w_range: - blob._do_download( - transport, - file_obj, - download_url, - headers, - start=1, - end=3, - raw_download=raw_download, - ) - else: - blob._do_download( - transport, - file_obj, - download_url, - headers, - raw_download=raw_download, - ) - - if w_range: - patched.assert_called_once_with( - download_url, stream=file_obj, headers=headers, start=1, end=3 - ) - else: - patched.assert_called_once_with( - download_url, stream=file_obj, headers=headers, start=None, end=None - ) - patched.return_value.consume.assert_called_once_with(transport) - - def test__do_download_wo_chunks_wo_range_wo_raw(self): - self._do_download_helper_wo_chunks(w_range=False, raw_download=False) - - def test__do_download_wo_chunks_w_range_wo_raw(self): - self._do_download_helper_wo_chunks(w_range=True, raw_download=False) - - def test__do_download_wo_chunks_wo_range_w_raw(self): - self._do_download_helper_wo_chunks(w_range=False, raw_download=True) - - def test__do_download_wo_chunks_w_range_w_raw(self): - self._do_download_helper_wo_chunks(w_range=True, raw_download=True) - - def _do_download_helper_w_chunks(self, w_range, raw_download): - blob_name = "blob-name" - client = mock.Mock(_credentials=_make_credentials(), spec=["_credentials"]) - bucket = _Bucket(client) - blob = self._make_one(blob_name, bucket=bucket) - blob._CHUNK_SIZE_MULTIPLE = 1 - chunk_size = blob.chunk_size = 3 - - transport = object() - file_obj = io.BytesIO() - download_url = "http://test.invalid" - headers = {} - - download = mock.Mock(finished=False, spec=["finished", "consume_next_chunk"]) - - def side_effect(_): - download.finished = True - - download.consume_next_chunk.side_effect = side_effect - - if raw_download: - patch = mock.patch("google.cloud.storage.blob.RawChunkedDownload") - else: - patch = mock.patch("google.cloud.storage.blob.ChunkedDownload") - - with patch as patched: - patched.return_value = download - if w_range: - blob._do_download( - transport, - file_obj, - download_url, - headers, - start=1, - end=3, - raw_download=raw_download, - ) - else: - blob._do_download( - transport, - file_obj, - download_url, - headers, - raw_download=raw_download, - ) - - if w_range: - patched.assert_called_once_with( - download_url, chunk_size, file_obj, headers=headers, start=1, end=3 - ) - else: - patched.assert_called_once_with( - download_url, chunk_size, file_obj, headers=headers, start=0, end=None - ) - download.consume_next_chunk.assert_called_once_with(transport) - - def test__do_download_w_chunks_wo_range_wo_raw(self): - self._do_download_helper_w_chunks(w_range=False, raw_download=False) - - def test__do_download_w_chunks_w_range_wo_raw(self): - self._do_download_helper_w_chunks(w_range=True, raw_download=False) - - def test__do_download_w_chunks_wo_range_w_raw(self): - self._do_download_helper_w_chunks(w_range=False, raw_download=True) - - def test__do_download_w_chunks_w_range_w_raw(self): - self._do_download_helper_w_chunks(w_range=True, raw_download=True) - - def test_download_to_file_with_failure(self): - import requests - from google.resumable_media import InvalidResponse - from google.cloud import exceptions - - raw_response = requests.Response() - raw_response.status_code = http_client.NOT_FOUND - raw_request = requests.Request("GET", "http://example.com") - raw_response.request = raw_request.prepare() - grmp_response = InvalidResponse(raw_response) - - blob_name = "blob-name" - media_link = "http://test.invalid" - client = mock.Mock(spec=[u"_http"]) - bucket = _Bucket(client) - blob = self._make_one(blob_name, bucket=bucket) - blob._properties["mediaLink"] = media_link - blob._do_download = mock.Mock() - blob._do_download.side_effect = grmp_response - - file_obj = io.BytesIO() - with self.assertRaises(exceptions.NotFound): - blob.download_to_file(file_obj) - - self.assertEqual(file_obj.tell(), 0) - - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, file_obj, media_link, headers, None, None, False - ) - - def test_download_to_file_wo_media_link(self): - blob_name = "blob-name" - client = mock.Mock(spec=[u"_http"]) - bucket = _Bucket(client) - blob = self._make_one(blob_name, bucket=bucket) - blob._do_download = mock.Mock() - file_obj = io.BytesIO() - - blob.download_to_file(file_obj) - - # Make sure the media link is still unknown. - self.assertIsNone(blob.media_link) - - expected_url = ( - "https://storage.googleapis.com/download/storage/v1/b/" - "name/o/blob-name?alt=media" - ) - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, file_obj, expected_url, headers, None, None, False - ) - - def _download_to_file_helper(self, use_chunks, raw_download): - blob_name = "blob-name" - client = mock.Mock(spec=[u"_http"]) - bucket = _Bucket(client) - media_link = "http://example.com/media/" - properties = {"mediaLink": media_link} - blob = self._make_one(blob_name, bucket=bucket, properties=properties) - if use_chunks: - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 3 - blob._do_download = mock.Mock() - - file_obj = io.BytesIO() - if raw_download: - blob.download_to_file(file_obj, raw_download=True) - else: - blob.download_to_file(file_obj) - - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, file_obj, media_link, headers, None, None, raw_download - ) - - def test_download_to_file_wo_chunks_wo_raw(self): - self._download_to_file_helper(use_chunks=False, raw_download=False) - - def test_download_to_file_w_chunks_wo_raw(self): - self._download_to_file_helper(use_chunks=True, raw_download=False) - - def test_download_to_file_wo_chunks_w_raw(self): - self._download_to_file_helper(use_chunks=False, raw_download=True) - - def test_download_to_file_w_chunks_w_raw(self): - self._download_to_file_helper(use_chunks=True, raw_download=True) - - def _download_to_filename_helper(self, updated, raw_download): - import os - import time - from google.cloud._testing import _NamedTemporaryFile - - blob_name = "blob-name" - client = mock.Mock(spec=["_http"]) - bucket = _Bucket(client) - media_link = "http://example.com/media/" - properties = {"mediaLink": media_link} - if updated is not None: - properties["updated"] = updated - - blob = self._make_one(blob_name, bucket=bucket, properties=properties) - blob._do_download = mock.Mock() - - with _NamedTemporaryFile() as temp: - blob.download_to_filename(temp.name, raw_download=raw_download) - if updated is None: - self.assertIsNone(blob.updated) - else: - mtime = os.path.getmtime(temp.name) - updated_time = time.mktime(blob.updated.timetuple()) - self.assertEqual(mtime, updated_time) - - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, mock.ANY, media_link, headers, None, None, raw_download - ) - stream = blob._do_download.mock_calls[0].args[1] - self.assertEqual(stream.name, temp.name) - - def test_download_to_filename_w_updated_wo_raw(self): - updated = "2014-12-06T13:13:50.690Z" - self._download_to_filename_helper(updated=updated, raw_download=False) - - def test_download_to_filename_wo_updated_wo_raw(self): - self._download_to_filename_helper(updated=None, raw_download=False) - - def test_download_to_filename_w_updated_w_raw(self): - updated = "2014-12-06T13:13:50.690Z" - self._download_to_filename_helper(updated=updated, raw_download=True) - - def test_download_to_filename_wo_updated_w_raw(self): - self._download_to_filename_helper(updated=None, raw_download=True) - - def test_download_to_filename_corrupted(self): - from google.resumable_media import DataCorruption - - blob_name = "blob-name" - client = mock.Mock(spec=["_http"]) - bucket = _Bucket(client) - media_link = "http://example.com/media/" - properties = {"mediaLink": media_link} - - blob = self._make_one(blob_name, bucket=bucket, properties=properties) - blob._do_download = mock.Mock() - blob._do_download.side_effect = DataCorruption("testing") - - # Try to download into a temporary file (don't use - # `_NamedTemporaryFile` it will try to remove after the file is - # already removed) - filehandle, filename = tempfile.mkstemp() - os.close(filehandle) - self.assertTrue(os.path.exists(filename)) - - with self.assertRaises(DataCorruption): - blob.download_to_filename(filename) - - # Make sure the file was cleaned up. - self.assertFalse(os.path.exists(filename)) - - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, mock.ANY, media_link, headers, None, None, False - ) - stream = blob._do_download.mock_calls[0].args[1] - self.assertEqual(stream.name, filename) - - def test_download_to_filename_w_key(self): - from google.cloud._testing import _NamedTemporaryFile - from google.cloud.storage.blob import _get_encryption_headers - - blob_name = "blob-name" - # Create a fake client/bucket and use them in the Blob() constructor. - client = mock.Mock(spec=["_http"]) - bucket = _Bucket(client) - media_link = "http://example.com/media/" - properties = {"mediaLink": media_link} - key = b"aa426195405adee2c8081bb9e7e74b19" - blob = self._make_one( - blob_name, bucket=bucket, properties=properties, encryption_key=key - ) - blob._do_download = mock.Mock() - - with _NamedTemporaryFile() as temp: - blob.download_to_filename(temp.name) - - headers = {"accept-encoding": "gzip"} - headers.update(_get_encryption_headers(key)) - blob._do_download.assert_called_once_with( - client._http, mock.ANY, media_link, headers, None, None, False - ) - stream = blob._do_download.mock_calls[0].args[1] - self.assertEqual(stream.name, temp.name) - - def _download_as_string_helper(self, raw_download): - blob_name = "blob-name" - client = mock.Mock(spec=["_http"]) - bucket = _Bucket(client) - media_link = "http://example.com/media/" - properties = {"mediaLink": media_link} - blob = self._make_one(blob_name, bucket=bucket, properties=properties) - blob._do_download = mock.Mock() - - fetched = blob.download_as_string(raw_download=raw_download) - self.assertEqual(fetched, b"") - - headers = {"accept-encoding": "gzip"} - blob._do_download.assert_called_once_with( - client._http, mock.ANY, media_link, headers, None, None, raw_download - ) - stream = blob._do_download.mock_calls[0].args[1] - self.assertIsInstance(stream, io.BytesIO) - - def test_download_as_string_wo_raw(self): - self._download_as_string_helper(raw_download=False) - - def test_download_as_string_w_raw(self): - self._download_as_string_helper(raw_download=True) - - def test__get_content_type_explicit(self): - blob = self._make_one(u"blob-name", bucket=None) - - content_type = u"text/plain" - return_value = blob._get_content_type(content_type) - self.assertEqual(return_value, content_type) - - def test__get_content_type_from_blob(self): - blob = self._make_one(u"blob-name", bucket=None) - blob.content_type = u"video/mp4" - - return_value = blob._get_content_type(None) - self.assertEqual(return_value, blob.content_type) - - def test__get_content_type_from_filename(self): - blob = self._make_one(u"blob-name", bucket=None) - - return_value = blob._get_content_type(None, filename="archive.tar") - self.assertEqual(return_value, "application/x-tar") - - def test__get_content_type_default(self): - blob = self._make_one(u"blob-name", bucket=None) - - return_value = blob._get_content_type(None) - self.assertEqual(return_value, u"application/octet-stream") - - def test__get_writable_metadata_no_changes(self): - name = u"blob-name" - blob = self._make_one(name, bucket=None) - - object_metadata = blob._get_writable_metadata() - expected = {"name": name} - self.assertEqual(object_metadata, expected) - - def test__get_writable_metadata_with_changes(self): - name = u"blob-name" - blob = self._make_one(name, bucket=None) - blob.storage_class = "NEARLINE" - blob.cache_control = "max-age=3600" - blob.metadata = {"color": "red"} - - object_metadata = blob._get_writable_metadata() - expected = { - "cacheControl": blob.cache_control, - "metadata": blob.metadata, - "name": name, - "storageClass": blob.storage_class, - } - self.assertEqual(object_metadata, expected) - - def test__get_writable_metadata_unwritable_field(self): - name = u"blob-name" - properties = {"updated": "2016-10-16T18:18:18.181Z"} - blob = self._make_one(name, bucket=None, properties=properties) - # Fake that `updated` is in changes. - blob._changes.add("updated") - - object_metadata = blob._get_writable_metadata() - expected = {"name": name} - self.assertEqual(object_metadata, expected) - - def test__get_upload_arguments(self): - name = u"blob-name" - key = b"[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO" - blob = self._make_one(name, bucket=None, encryption_key=key) - blob.content_disposition = "inline" - - content_type = u"image/jpeg" - info = blob._get_upload_arguments(content_type) - - headers, object_metadata, new_content_type = info - header_key_value = "W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8=" - header_key_hash_value = "G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg=" - expected_headers = { - "X-Goog-Encryption-Algorithm": "AES256", - "X-Goog-Encryption-Key": header_key_value, - "X-Goog-Encryption-Key-Sha256": header_key_hash_value, - } - self.assertEqual(headers, expected_headers) - expected_metadata = { - "contentDisposition": blob.content_disposition, - "name": name, - } - self.assertEqual(object_metadata, expected_metadata) - self.assertEqual(new_content_type, content_type) - - def _mock_transport(self, status_code, headers, content=b""): - fake_transport = mock.Mock(spec=["request"]) - fake_response = self._mock_requests_response( - status_code, headers, content=content - ) - fake_transport.request.return_value = fake_response - return fake_transport - - def _do_multipart_success( - self, - mock_get_boundary, - size=None, - num_retries=None, - user_project=None, - predefined_acl=None, - kms_key_name=None, - ): - from six.moves.urllib.parse import urlencode - - bucket = _Bucket(name="w00t", user_project=user_project) - blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) - self.assertIsNone(blob.chunk_size) - - # Create mocks to be checked for doing transport. - transport = self._mock_transport(http_client.OK, {}) - - # Create some mock arguments. - client = mock.Mock(_http=transport, spec=["_http"]) - data = b"data here hear hier" - stream = io.BytesIO(data) - content_type = u"application/xml" - response = blob._do_multipart_upload( - client, stream, content_type, size, num_retries, predefined_acl - ) - - # Check the mocks and the returned value. - self.assertIs(response, transport.request.return_value) - if size is None: - data_read = data - self.assertEqual(stream.tell(), len(data)) - else: - data_read = data[:size] - self.assertEqual(stream.tell(), size) - - mock_get_boundary.assert_called_once_with() - - upload_url = ( - "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" - ) - - qs_params = [("uploadType", "multipart")] - - if user_project is not None: - qs_params.append(("userProject", user_project)) - - if predefined_acl is not None: - qs_params.append(("predefinedAcl", predefined_acl)) - - if kms_key_name is not None: - qs_params.append(("kmsKeyName", kms_key_name)) - - upload_url += "?" + urlencode(qs_params) - - payload = ( - b"--==0==\r\n" - + b"content-type: application/json; charset=UTF-8\r\n\r\n" - + b'{"name": "blob-name"}\r\n' - + b"--==0==\r\n" - + b"content-type: application/xml\r\n\r\n" - + data_read - + b"\r\n--==0==--" - ) - headers = {"content-type": b'multipart/related; boundary="==0=="'} - transport.request.assert_called_once_with( - "POST", upload_url, data=payload, headers=headers, timeout=mock.ANY - ) - - @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") - def test__do_multipart_upload_no_size(self, mock_get_boundary): - self._do_multipart_success(mock_get_boundary, predefined_acl="private") - - @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") - def test__do_multipart_upload_with_size(self, mock_get_boundary): - self._do_multipart_success(mock_get_boundary, size=10) - - @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") - def test__do_multipart_upload_with_user_project(self, mock_get_boundary): - user_project = "user-project-123" - self._do_multipart_success(mock_get_boundary, user_project=user_project) - - @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") - def test__do_multipart_upload_with_kms(self, mock_get_boundary): - kms_resource = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource) - - @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") - def test__do_multipart_upload_with_retry(self, mock_get_boundary): - self._do_multipart_success(mock_get_boundary, num_retries=8) - - def test__do_multipart_upload_bad_size(self): - blob = self._make_one(u"blob-name", bucket=None) - - data = b"data here hear hier" - stream = io.BytesIO(data) - size = 50 - self.assertGreater(size, len(data)) - - with self.assertRaises(ValueError) as exc_info: - blob._do_multipart_upload(None, stream, None, size, None, None) - - exc_contents = str(exc_info.exception) - self.assertIn("was specified but the file-like object only had", exc_contents) - self.assertEqual(stream.tell(), len(data)) - - def _initiate_resumable_helper( - self, - size=None, - extra_headers=None, - chunk_size=None, - num_retries=None, - user_project=None, - predefined_acl=None, - blob_chunk_size=786432, - kms_key_name=None, - ): - from six.moves.urllib.parse import urlencode - from google.resumable_media.requests import ResumableUpload - from google.cloud.storage.blob import _DEFAULT_CHUNKSIZE - - bucket = _Bucket(name="whammy", user_project=user_project) - blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) - blob.metadata = {"rook": "takes knight"} - blob.chunk_size = blob_chunk_size - if blob_chunk_size is not None: - self.assertIsNotNone(blob.chunk_size) - else: - self.assertIsNone(blob.chunk_size) - - # Need to make sure **same** dict is used because ``json.dumps()`` - # will depend on the hash order. - object_metadata = blob._get_writable_metadata() - blob._get_writable_metadata = mock.Mock(return_value=object_metadata, spec=[]) - - # Create mocks to be checked for doing transport. - resumable_url = "http://test.invalid?upload_id=hey-you" - response_headers = {"location": resumable_url} - transport = self._mock_transport(http_client.OK, response_headers) - - # Create some mock arguments and call the method under test. - client = mock.Mock(_http=transport, spec=[u"_http"]) - data = b"hello hallo halo hi-low" - stream = io.BytesIO(data) - content_type = u"text/plain" - upload, transport = blob._initiate_resumable_upload( - client, - stream, - content_type, - size, - num_retries, - extra_headers=extra_headers, - chunk_size=chunk_size, - predefined_acl=predefined_acl, - ) - - # Check the returned values. - self.assertIsInstance(upload, ResumableUpload) - - upload_url = ( - "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" - ) - qs_params = [("uploadType", "resumable")] - - if user_project is not None: - qs_params.append(("userProject", user_project)) - - if predefined_acl is not None: - qs_params.append(("predefinedAcl", predefined_acl)) - - if kms_key_name is not None: - qs_params.append(("kmsKeyName", kms_key_name)) - - upload_url += "?" + urlencode(qs_params) - - self.assertEqual(upload.upload_url, upload_url) - if extra_headers is None: - self.assertEqual(upload._headers, {}) - else: - self.assertEqual(upload._headers, extra_headers) - self.assertIsNot(upload._headers, extra_headers) - self.assertFalse(upload.finished) - if chunk_size is None: - if blob_chunk_size is None: - self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE) - else: - self.assertEqual(upload._chunk_size, blob.chunk_size) - else: - self.assertNotEqual(blob.chunk_size, chunk_size) - self.assertEqual(upload._chunk_size, chunk_size) - self.assertIs(upload._stream, stream) - if size is None: - self.assertIsNone(upload._total_bytes) - else: - self.assertEqual(upload._total_bytes, size) - self.assertEqual(upload._content_type, content_type) - self.assertEqual(upload.resumable_url, resumable_url) - retry_strategy = upload._retry_strategy - self.assertEqual(retry_strategy.max_sleep, 64.0) - if num_retries is None: - self.assertEqual(retry_strategy.max_cumulative_retry, 600.0) - self.assertIsNone(retry_strategy.max_retries) - else: - self.assertIsNone(retry_strategy.max_cumulative_retry) - self.assertEqual(retry_strategy.max_retries, num_retries) - self.assertIs(transport, transport) - # Make sure we never read from the stream. - self.assertEqual(stream.tell(), 0) - - # Check the mocks. - blob._get_writable_metadata.assert_called_once_with() - payload = json.dumps(object_metadata).encode("utf-8") - expected_headers = { - "content-type": "application/json; charset=UTF-8", - "x-upload-content-type": content_type, - } - if size is not None: - expected_headers["x-upload-content-length"] = str(size) - if extra_headers is not None: - expected_headers.update(extra_headers) - transport.request.assert_called_once_with( - "POST", upload_url, data=payload, headers=expected_headers, timeout=mock.ANY - ) - - def test__initiate_resumable_upload_no_size(self): - self._initiate_resumable_helper() - - def test__initiate_resumable_upload_with_size(self): - self._initiate_resumable_helper(size=10000) - - def test__initiate_resumable_upload_with_user_project(self): - user_project = "user-project-123" - self._initiate_resumable_helper(user_project=user_project) - - def test__initiate_resumable_upload_with_kms(self): - kms_resource = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - self._initiate_resumable_helper(kms_key_name=kms_resource) - - def test__initiate_resumable_upload_without_chunk_size(self): - self._initiate_resumable_helper(blob_chunk_size=None) - - def test__initiate_resumable_upload_with_chunk_size(self): - one_mb = 1048576 - self._initiate_resumable_helper(chunk_size=one_mb) - - def test__initiate_resumable_upload_with_extra_headers(self): - extra_headers = {"origin": "http://not-in-kansas-anymore.invalid"} - self._initiate_resumable_helper(extra_headers=extra_headers) - - def test__initiate_resumable_upload_with_retry(self): - self._initiate_resumable_helper(num_retries=11) - - def test__initiate_resumable_upload_with_predefined_acl(self): - self._initiate_resumable_helper(predefined_acl="private") - - def _make_resumable_transport(self, headers1, headers2, headers3, total_bytes): - from google import resumable_media - - fake_transport = mock.Mock(spec=["request"]) - - fake_response1 = self._mock_requests_response(http_client.OK, headers1) - fake_response2 = self._mock_requests_response( - resumable_media.PERMANENT_REDIRECT, headers2 - ) - json_body = '{{"size": "{:d}"}}'.format(total_bytes) - fake_response3 = self._mock_requests_response( - http_client.OK, headers3, content=json_body.encode("utf-8") - ) - - responses = [fake_response1, fake_response2, fake_response3] - fake_transport.request.side_effect = responses - return fake_transport, responses - - @staticmethod - def _do_resumable_upload_call0(blob, content_type, size=None, predefined_acl=None): - # First mock transport.request() does initiates upload. - upload_url = ( - "https://storage.googleapis.com/upload/storage/v1" - + blob.bucket.path - + "/o?uploadType=resumable" - ) - if predefined_acl is not None: - upload_url += "&predefinedAcl={}".format(predefined_acl) - expected_headers = { - "content-type": "application/json; charset=UTF-8", - "x-upload-content-type": content_type, - } - if size is not None: - expected_headers["x-upload-content-length"] = str(size) - payload = json.dumps({"name": blob.name}).encode("utf-8") - return mock.call( - "POST", upload_url, data=payload, headers=expected_headers, timeout=mock.ANY - ) - - @staticmethod - def _do_resumable_upload_call1( - blob, content_type, data, resumable_url, size=None, predefined_acl=None - ): - # Second mock transport.request() does sends first chunk. - if size is None: - content_range = "bytes 0-{:d}/*".format(blob.chunk_size - 1) - else: - content_range = "bytes 0-{:d}/{:d}".format(blob.chunk_size - 1, size) - - expected_headers = { - "content-type": content_type, - "content-range": content_range, - } - payload = data[: blob.chunk_size] - return mock.call( - "PUT", - resumable_url, - data=payload, - headers=expected_headers, - timeout=mock.ANY, - ) - - @staticmethod - def _do_resumable_upload_call2( - blob, content_type, data, resumable_url, total_bytes, predefined_acl=None - ): - # Third mock transport.request() does sends last chunk. - content_range = "bytes {:d}-{:d}/{:d}".format( - blob.chunk_size, total_bytes - 1, total_bytes - ) - expected_headers = { - "content-type": content_type, - "content-range": content_range, - } - payload = data[blob.chunk_size :] - return mock.call( - "PUT", - resumable_url, - data=payload, - headers=expected_headers, - timeout=mock.ANY, - ) - - def _do_resumable_helper( - self, use_size=False, num_retries=None, predefined_acl=None - ): - bucket = _Bucket(name="yesterday") - blob = self._make_one(u"blob-name", bucket=bucket) - blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE - self.assertIsNotNone(blob.chunk_size) - - # Data to be uploaded. - data = b"" + (b"A" * blob.chunk_size) + b"" - total_bytes = len(data) - if use_size: - size = total_bytes - else: - size = None - - # Create mocks to be checked for doing transport. - resumable_url = "http://test.invalid?upload_id=and-then-there-was-1" - headers1 = {"location": resumable_url} - headers2 = {"range": "bytes=0-{:d}".format(blob.chunk_size - 1)} - transport, responses = self._make_resumable_transport( - headers1, headers2, {}, total_bytes - ) - - # Create some mock arguments and call the method under test. - client = mock.Mock(_http=transport, spec=["_http"]) - stream = io.BytesIO(data) - content_type = u"text/html" - response = blob._do_resumable_upload( - client, stream, content_type, size, num_retries, predefined_acl - ) - - # Check the returned values. - self.assertIs(response, responses[2]) - self.assertEqual(stream.tell(), total_bytes) - - # Check the mocks. - call0 = self._do_resumable_upload_call0( - blob, content_type, size=size, predefined_acl=predefined_acl - ) - call1 = self._do_resumable_upload_call1( - blob, - content_type, - data, - resumable_url, - size=size, - predefined_acl=predefined_acl, - ) - call2 = self._do_resumable_upload_call2( - blob, - content_type, - data, - resumable_url, - total_bytes, - predefined_acl=predefined_acl, - ) - self.assertEqual(transport.request.mock_calls, [call0, call1, call2]) - - def test__do_resumable_upload_no_size(self): - self._do_resumable_helper() - - def test__do_resumable_upload_with_size(self): - self._do_resumable_helper(use_size=True) - - def test__do_resumable_upload_with_retry(self): - self._do_resumable_helper(num_retries=6) - - def test__do_resumable_upload_with_predefined_acl(self): - self._do_resumable_helper(predefined_acl="private") - - def _do_upload_helper( - self, chunk_size=None, num_retries=None, predefined_acl=None, size=None - ): - from google.cloud.storage.blob import _MAX_MULTIPART_SIZE - - blob = self._make_one(u"blob-name", bucket=None) - - # Create a fake response. - response = mock.Mock(spec=[u"json"]) - response.json.return_value = mock.sentinel.json - # Mock **both** helpers. - blob._do_multipart_upload = mock.Mock(return_value=response, spec=[]) - blob._do_resumable_upload = mock.Mock(return_value=response, spec=[]) - - if chunk_size is None: - self.assertIsNone(blob.chunk_size) - else: - blob.chunk_size = chunk_size - self.assertIsNotNone(blob.chunk_size) - - client = mock.sentinel.client - stream = mock.sentinel.stream - content_type = u"video/mp4" - if size is None: - size = 12345654321 - # Make the request and check the mocks. - created_json = blob._do_upload( - client, stream, content_type, size, num_retries, predefined_acl - ) - self.assertIs(created_json, mock.sentinel.json) - response.json.assert_called_once_with() - if size is not None and size <= _MAX_MULTIPART_SIZE: - blob._do_multipart_upload.assert_called_once_with( - client, stream, content_type, size, num_retries, predefined_acl - ) - blob._do_resumable_upload.assert_not_called() - else: - blob._do_multipart_upload.assert_not_called() - blob._do_resumable_upload.assert_called_once_with( - client, stream, content_type, size, num_retries, predefined_acl - ) - - def test__do_upload_uses_multipart(self): - from google.cloud.storage.blob import _MAX_MULTIPART_SIZE - - self._do_upload_helper(size=_MAX_MULTIPART_SIZE) - - def test__do_upload_uses_resumable(self): - from google.cloud.storage.blob import _MAX_MULTIPART_SIZE - - chunk_size = 256 * 1024 # 256KB - self._do_upload_helper(chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1) - - def test__do_upload_with_retry(self): - self._do_upload_helper(num_retries=20) - - def _upload_from_file_helper(self, side_effect=None, **kwargs): - from google.cloud._helpers import UTC - - blob = self._make_one("blob-name", bucket=None) - # Mock low-level upload helper on blob (it is tested elsewhere). - created_json = {"updated": "2017-01-01T09:09:09.081Z"} - blob._do_upload = mock.Mock(return_value=created_json, spec=[]) - if side_effect is not None: - blob._do_upload.side_effect = side_effect - # Make sure `updated` is empty before the request. - self.assertIsNone(blob.updated) - - data = b"data is here" - stream = io.BytesIO(data) - stream.seek(2) # Not at zero. - content_type = u"font/woff" - client = mock.sentinel.client - predefined_acl = kwargs.get("predefined_acl", None) - ret_val = blob.upload_from_file( - stream, size=len(data), content_type=content_type, client=client, **kwargs - ) - - # Check the response and side-effects. - self.assertIsNone(ret_val) - new_updated = datetime.datetime(2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC) - self.assertEqual(blob.updated, new_updated) - - # Check the mock. - num_retries = kwargs.get("num_retries") - blob._do_upload.assert_called_once_with( - client, stream, content_type, len(data), num_retries, predefined_acl - ) - return stream - - def test_upload_from_file_success(self): - stream = self._upload_from_file_helper(predefined_acl="private") - assert stream.tell() == 2 - - @mock.patch("warnings.warn") - def test_upload_from_file_with_retries(self, mock_warn): - from google.cloud.storage import blob as blob_module - - self._upload_from_file_helper(num_retries=20) - mock_warn.assert_called_once_with( - blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2 - ) - - def test_upload_from_file_with_rewind(self): - stream = self._upload_from_file_helper(rewind=True) - assert stream.tell() == 0 - - def test_upload_from_file_failure(self): - import requests - - from google.resumable_media import InvalidResponse - from google.cloud import exceptions - - message = "Someone is already in this spot." - response = requests.Response() - response.status_code = http_client.CONFLICT - response.request = requests.Request("POST", "http://example.com").prepare() - side_effect = InvalidResponse(response, message) - - with self.assertRaises(exceptions.Conflict) as exc_info: - self._upload_from_file_helper(side_effect=side_effect) - - self.assertIn(message, exc_info.exception.message) - self.assertEqual(exc_info.exception.errors, []) - - def _do_upload_mock_call_helper(self, blob, client, content_type, size): - self.assertEqual(blob._do_upload.call_count, 1) - mock_call = blob._do_upload.mock_calls[0] - call_name, pos_args, kwargs = mock_call - self.assertEqual(call_name, "") - self.assertEqual(len(pos_args), 6) - self.assertEqual(pos_args[0], client) - self.assertEqual(pos_args[2], content_type) - self.assertEqual(pos_args[3], size) - self.assertIsNone(pos_args[4]) # num_retries - self.assertIsNone(pos_args[5]) # predefined_acl - self.assertEqual(kwargs, {}) - - return pos_args[1] - - def test_upload_from_filename(self): - from google.cloud._testing import _NamedTemporaryFile - - blob = self._make_one("blob-name", bucket=None) - # Mock low-level upload helper on blob (it is tested elsewhere). - created_json = {"metadata": {"mint": "ice-cream"}} - blob._do_upload = mock.Mock(return_value=created_json, spec=[]) - # Make sure `metadata` is empty before the request. - self.assertIsNone(blob.metadata) - - data = b"soooo much data" - content_type = u"image/svg+xml" - client = mock.sentinel.client - with _NamedTemporaryFile() as temp: - with open(temp.name, "wb") as file_obj: - file_obj.write(data) - - ret_val = blob.upload_from_filename( - temp.name, content_type=content_type, client=client - ) - - # Check the response and side-effects. - self.assertIsNone(ret_val) - self.assertEqual(blob.metadata, created_json["metadata"]) - - # Check the mock. - stream = self._do_upload_mock_call_helper(blob, client, content_type, len(data)) - self.assertTrue(stream.closed) - self.assertEqual(stream.mode, "rb") - self.assertEqual(stream.name, temp.name) - - def _upload_from_string_helper(self, data, **kwargs): - from google.cloud._helpers import _to_bytes - - blob = self._make_one("blob-name", bucket=None) - - # Mock low-level upload helper on blob (it is tested elsewhere). - created_json = {"componentCount": "5"} - blob._do_upload = mock.Mock(return_value=created_json, spec=[]) - # Make sure `metadata` is empty before the request. - self.assertIsNone(blob.component_count) - - client = mock.sentinel.client - ret_val = blob.upload_from_string(data, client=client, **kwargs) - - # Check the response and side-effects. - self.assertIsNone(ret_val) - self.assertEqual(blob.component_count, 5) - - # Check the mock. - payload = _to_bytes(data, encoding="utf-8") - stream = self._do_upload_mock_call_helper( - blob, client, "text/plain", len(payload) - ) - self.assertIsInstance(stream, io.BytesIO) - self.assertEqual(stream.getvalue(), payload) - - def test_upload_from_string_w_bytes(self): - data = b"XB]jb\xb8tad\xe0" - self._upload_from_string_helper(data) - - def test_upload_from_string_w_text(self): - data = u"\N{snowman} \N{sailboat}" - self._upload_from_string_helper(data) - - def _create_resumable_upload_session_helper(self, origin=None, side_effect=None): - bucket = _Bucket(name="alex-trebek") - blob = self._make_one("blob-name", bucket=bucket) - chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE - blob.chunk_size = chunk_size - - # Create mocks to be checked for doing transport. - resumable_url = "http://test.invalid?upload_id=clean-up-everybody" - response_headers = {"location": resumable_url} - transport = self._mock_transport(http_client.OK, response_headers) - if side_effect is not None: - transport.request.side_effect = side_effect - - # Create some mock arguments and call the method under test. - content_type = u"text/plain" - size = 10000 - client = mock.Mock(_http=transport, spec=[u"_http"]) - new_url = blob.create_resumable_upload_session( - content_type=content_type, size=size, origin=origin, client=client - ) - - # Check the returned value and (lack of) side-effect. - self.assertEqual(new_url, resumable_url) - self.assertEqual(blob.chunk_size, chunk_size) - - # Check the mocks. - upload_url = ( - "https://storage.googleapis.com/upload/storage/v1" - + bucket.path - + "/o?uploadType=resumable" - ) - payload = b'{"name": "blob-name"}' - expected_headers = { - "content-type": "application/json; charset=UTF-8", - "x-upload-content-length": str(size), - "x-upload-content-type": content_type, - } - if origin is not None: - expected_headers["Origin"] = origin - transport.request.assert_called_once_with( - "POST", upload_url, data=payload, headers=expected_headers, timeout=mock.ANY - ) - - def test_create_resumable_upload_session(self): - self._create_resumable_upload_session_helper() - - def test_create_resumable_upload_session_with_origin(self): - self._create_resumable_upload_session_helper(origin="http://google.com") - - def test_create_resumable_upload_session_with_failure(self): - from google.resumable_media import InvalidResponse - from google.cloud import exceptions - - message = "5-oh-3 woe is me." - response = self._mock_requests_response( - status_code=http_client.SERVICE_UNAVAILABLE, headers={} - ) - side_effect = InvalidResponse(response, message) - - with self.assertRaises(exceptions.ServiceUnavailable) as exc_info: - self._create_resumable_upload_session_helper(side_effect=side_effect) - - self.assertIn(message, exc_info.exception.message) - self.assertEqual(exc_info.exception.errors, []) - - def test_get_iam_policy(self): - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - from google.cloud.storage.iam import STORAGE_EDITOR_ROLE - from google.cloud.storage.iam import STORAGE_VIEWER_ROLE - from google.api_core.iam import Policy - - BLOB_NAME = "blob-name" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - EDITOR1 = "domain:google.com" - EDITOR2 = "user:phred@example.com" - VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" - VIEWER2 = "user:phred@example.com" - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [ - {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, - {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, - {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, - ], - } - after = ({"status": http_client.OK}, RETURNED) - EXPECTED = { - binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"] - } - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - policy = blob.get_iam_policy() - - self.assertIsInstance(policy, Policy) - self.assertEqual(policy.etag, RETURNED["etag"]) - self.assertEqual(policy.version, RETURNED["version"]) - self.assertEqual(dict(policy), EXPECTED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "%s/iam" % (PATH,), - "query_params": {}, - "_target_object": None, - }, - ) - - def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - - BLOB_NAME = "blob-name" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}], - } - after = ({"status": http_client.OK}, RETURNED) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - blob.get_iam_policy(requested_policy_version=3) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "%s/iam" % (PATH,), - "query_params": {"optionsRequestedPolicyVersion": 3}, - "_target_object": None, - }, - ) - - def test_get_iam_policy_w_user_project(self): - from google.api_core.iam import Policy - - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [], - } - after = ({"status": http_client.OK}, RETURNED) - EXPECTED = {} - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - policy = blob.get_iam_policy() - - self.assertIsInstance(policy, Policy) - self.assertEqual(policy.etag, RETURNED["etag"]) - self.assertEqual(policy.version, RETURNED["version"]) - self.assertEqual(dict(policy), EXPECTED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "GET", - "path": "%s/iam" % (PATH,), - "query_params": {"userProject": USER_PROJECT}, - "_target_object": None, - }, - ) - - def test_set_iam_policy(self): - import operator - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - from google.cloud.storage.iam import STORAGE_EDITOR_ROLE - from google.cloud.storage.iam import STORAGE_VIEWER_ROLE - from google.api_core.iam import Policy - - BLOB_NAME = "blob-name" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - EDITOR1 = "domain:google.com" - EDITOR2 = "user:phred@example.com" - VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" - VIEWER2 = "user:phred@example.com" - BINDINGS = [ - {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, - {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, - {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, - ] - RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} - after = ({"status": http_client.OK}, RETURNED) - policy = Policy() - for binding in BINDINGS: - policy[binding["role"]] = binding["members"] - - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - returned = blob.set_iam_policy(policy) - - self.assertEqual(returned.etag, ETAG) - self.assertEqual(returned.version, VERSION) - self.assertEqual(dict(returned), dict(policy)) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {}) - sent = kw[0]["data"] - self.assertEqual(sent["resourceId"], PATH) - self.assertEqual(len(sent["bindings"]), len(BINDINGS)) - key = operator.itemgetter("role") - for found, expected in zip( - sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key) - ): - self.assertEqual(found["role"], expected["role"]) - self.assertEqual(sorted(found["members"]), sorted(expected["members"])) - - def test_set_iam_policy_w_user_project(self): - from google.api_core.iam import Policy - - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - BINDINGS = [] - RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} - after = ({"status": http_client.OK}, RETURNED) - policy = Policy() - - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - returned = blob.set_iam_policy(policy) - - self.assertEqual(returned.etag, ETAG) - self.assertEqual(returned.version, VERSION) - self.assertEqual(dict(returned), dict(policy)) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - self.assertEqual(kw[0]["data"], {"resourceId": PATH}) - - def test_test_iam_permissions(self): - from google.cloud.storage.iam import STORAGE_OBJECTS_LIST - from google.cloud.storage.iam import STORAGE_BUCKETS_GET - from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE - - BLOB_NAME = "blob-name" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - PERMISSIONS = [ - STORAGE_OBJECTS_LIST, - STORAGE_BUCKETS_GET, - STORAGE_BUCKETS_UPDATE, - ] - ALLOWED = PERMISSIONS[1:] - RETURNED = {"permissions": ALLOWED} - after = ({"status": http_client.OK}, RETURNED) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - allowed = blob.test_iam_permissions(PERMISSIONS) - - self.assertEqual(allowed, ALLOWED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"permissions": PERMISSIONS}) - - def test_test_iam_permissions_w_user_project(self): - from google.cloud.storage.iam import STORAGE_OBJECTS_LIST - from google.cloud.storage.iam import STORAGE_BUCKETS_GET - from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE - - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - PATH = "/b/name/o/%s" % (BLOB_NAME,) - PERMISSIONS = [ - STORAGE_OBJECTS_LIST, - STORAGE_BUCKETS_GET, - STORAGE_BUCKETS_UPDATE, - ] - ALLOWED = PERMISSIONS[1:] - RETURNED = {"permissions": ALLOWED} - after = ({"status": http_client.OK}, RETURNED) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - allowed = blob.test_iam_permissions(PERMISSIONS) - - self.assertEqual(allowed, ALLOWED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) - self.assertEqual( - kw[0]["query_params"], - {"permissions": PERMISSIONS, "userProject": USER_PROJECT}, - ) - - def test_make_public(self): - from google.cloud.storage.acl import _ACLEntity - - BLOB_NAME = "blob-name" - permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] - after = ({"status": http_client.OK}, {"acl": permissive}) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - blob.acl.loaded = True - blob.make_public() - self.assertEqual(list(blob.acl), permissive) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) - self.assertEqual(kw[0]["data"], {"acl": permissive}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - - def test_make_private(self): - BLOB_NAME = "blob-name" - no_permissions = [] - after = ({"status": http_client.OK}, {"acl": no_permissions}) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - blob.acl.loaded = True - blob.make_private() - self.assertEqual(list(blob.acl), no_permissions) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) - self.assertEqual(kw[0]["data"], {"acl": no_permissions}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - - def test_compose_wo_content_type_set(self): - SOURCE_1 = "source-1" - SOURCE_2 = "source-2" - DESTINATION = "destinaton" - RESOURCE = {} - after = ({"status": http_client.OK}, RESOURCE) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - source_1 = self._make_one(SOURCE_1, bucket=bucket) - source_2 = self._make_one(SOURCE_2, bucket=bucket) - destination = self._make_one(DESTINATION, bucket=bucket) - # no destination.content_type set - - destination.compose(sources=[source_1, source_2]) - - self.assertIsNone(destination.content_type) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "POST", - "path": "/b/name/o/%s/compose" % DESTINATION, - "query_params": {}, - "data": { - "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], - "destination": {}, - }, - "_target_object": destination, - }, - ) - - def test_compose_minimal_w_user_project(self): - SOURCE_1 = "source-1" - SOURCE_2 = "source-2" - DESTINATION = "destinaton" - RESOURCE = {"etag": "DEADBEEF"} - USER_PROJECT = "user-project-123" - after = ({"status": http_client.OK}, RESOURCE) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - source_1 = self._make_one(SOURCE_1, bucket=bucket) - source_2 = self._make_one(SOURCE_2, bucket=bucket) - destination = self._make_one(DESTINATION, bucket=bucket) - destination.content_type = "text/plain" - - destination.compose(sources=[source_1, source_2]) - - self.assertEqual(destination.etag, "DEADBEEF") - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "POST", - "path": "/b/name/o/%s/compose" % DESTINATION, - "query_params": {"userProject": USER_PROJECT}, - "data": { - "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], - "destination": {"contentType": "text/plain"}, - }, - "_target_object": destination, - }, - ) - - def test_compose_w_additional_property_changes(self): - SOURCE_1 = "source-1" - SOURCE_2 = "source-2" - DESTINATION = "destinaton" - RESOURCE = {"etag": "DEADBEEF"} - after = ({"status": http_client.OK}, RESOURCE) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - source_1 = self._make_one(SOURCE_1, bucket=bucket) - source_2 = self._make_one(SOURCE_2, bucket=bucket) - destination = self._make_one(DESTINATION, bucket=bucket) - destination.content_type = "text/plain" - destination.content_language = "en-US" - destination.metadata = {"my-key": "my-value"} - - destination.compose(sources=[source_1, source_2]) - - self.assertEqual(destination.etag, "DEADBEEF") - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual( - kw[0], - { - "method": "POST", - "path": "/b/name/o/%s/compose" % DESTINATION, - "query_params": {}, - "data": { - "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], - "destination": { - "contentType": "text/plain", - "contentLanguage": "en-US", - "metadata": {"my-key": "my-value"}, - }, - }, - "_target_object": destination, - }, - ) - - def test_rewrite_response_without_resource(self): - SOURCE_BLOB = "source" - DEST_BLOB = "dest" - DEST_BUCKET = "other-bucket" - TOKEN = "TOKEN" - RESPONSE = { - "totalBytesRewritten": 33, - "objectSize": 42, - "done": False, - "rewriteToken": TOKEN, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - source_bucket = _Bucket(client=client) - source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) - dest_bucket = _Bucket(client=client, name=DEST_BUCKET) - dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) - - token, rewritten, size = dest_blob.rewrite(source_blob) - - self.assertEqual(token, TOKEN) - self.assertEqual(rewritten, 33) - self.assertEqual(size, 42) - - def test_rewrite_w_generations(self): - SOURCE_BLOB = "source" - SOURCE_GENERATION = 42 - DEST_BLOB = "dest" - DEST_BUCKET = "other-bucket" - DEST_GENERATION = 43 - TOKEN = "TOKEN" - RESPONSE = { - "totalBytesRewritten": 33, - "objectSize": 42, - "done": False, - "rewriteToken": TOKEN, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - source_bucket = _Bucket(client=client) - source_blob = self._make_one( - SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION - ) - dest_bucket = _Bucket(client=client, name=DEST_BUCKET) - dest_blob = self._make_one( - DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION - ) - - token, rewritten, size = dest_blob.rewrite(source_blob) - - self.assertEqual(token, TOKEN) - self.assertEqual(rewritten, 33) - self.assertEqual(size, 42) - - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual( - kw["path"], - "/b/%s/o/%s/rewriteTo/b/%s/o/%s" - % ( - (source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name) - ), - ) - self.assertEqual(kw["query_params"], {"sourceGeneration": SOURCE_GENERATION}) - - def test_rewrite_other_bucket_other_name_no_encryption_partial(self): - SOURCE_BLOB = "source" - DEST_BLOB = "dest" - DEST_BUCKET = "other-bucket" - TOKEN = "TOKEN" - RESPONSE = { - "totalBytesRewritten": 33, - "objectSize": 42, - "done": False, - "rewriteToken": TOKEN, - "resource": {"etag": "DEADBEEF"}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - source_bucket = _Bucket(client=client) - source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) - dest_bucket = _Bucket(client=client, name=DEST_BUCKET) - dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) - - token, rewritten, size = dest_blob.rewrite(source_blob) - - self.assertEqual(token, TOKEN) - self.assertEqual(rewritten, 33) - self.assertEqual(size, 42) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/%s/o/%s" % ( - SOURCE_BLOB, - DEST_BUCKET, - DEST_BLOB, - ) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual(kw[0]["query_params"], {}) - SENT = {} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) - self.assertNotIn("X-Goog-Encryption-Algorithm", headers) - self.assertNotIn("X-Goog-Encryption-Key", headers) - self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) - - def test_rewrite_same_name_no_old_key_new_key_done_w_user_project(self): - KEY = b"01234567890123456789012345678901" # 32 bytes - KEY_B64 = base64.b64encode(KEY).rstrip().decode("ascii") - KEY_HASH = hashlib.sha256(KEY).digest() - KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode("ascii") - BLOB_NAME = "blob" - USER_PROJECT = "user-project-123" - RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 42, - "done": True, - "resource": {"etag": "DEADBEEF"}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - plain = self._make_one(BLOB_NAME, bucket=bucket) - encrypted = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) - - token, rewritten, size = encrypted.rewrite(plain) - - self.assertIsNone(token) - self.assertEqual(rewritten, 42) - self.assertEqual(size, 42) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - SENT = {} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) - self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Encryption-Key"], KEY_B64) - self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], KEY_HASH_B64) - - def test_rewrite_same_name_no_key_new_key_w_token(self): - SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes - SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") - SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() - SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") - DEST_KEY = b"90123456789012345678901234567890" # 32 bytes - DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode("ascii") - DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest() - DEST_KEY_HASH_B64 = base64.b64encode(DEST_KEY_HASH).rstrip().decode("ascii") - BLOB_NAME = "blob" - TOKEN = "TOKEN" - RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 42, - "done": True, - "resource": {"etag": "DEADBEEF"}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - bucket = _Bucket(client=client) - source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) - dest = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=DEST_KEY) - - token, rewritten, size = dest.rewrite(source, token=TOKEN) - - self.assertIsNone(token) - self.assertEqual(rewritten, 42) - self.assertEqual(size, 42) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual(kw[0]["query_params"], {"rewriteToken": TOKEN}) - SENT = {} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) - self.assertEqual( - headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 - ) - self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Encryption-Key"], DEST_KEY_B64) - self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], DEST_KEY_HASH_B64) - - def test_rewrite_same_name_w_old_key_new_kms_key(self): - SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes - SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") - SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() - SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") - DEST_KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - BLOB_NAME = "blob" - RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 42, - "done": True, - "resource": {"etag": "DEADBEEF"}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - bucket = _Bucket(client=client) - source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) - dest = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=DEST_KMS_RESOURCE) - - token, rewritten, size = dest.rewrite(source) - - self.assertIsNone(token) - self.assertEqual(rewritten, 42) - self.assertEqual(size, 42) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual( - kw[0]["query_params"], {"destinationKmsKeyName": DEST_KMS_RESOURCE} - ) - SENT = {"kmsKeyName": DEST_KMS_RESOURCE} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) - self.assertEqual( - headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 - ) - - def test_update_storage_class_invalid(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - with self.assertRaises(ValueError): - blob.update_storage_class(u"BOGUS") - - def test_update_storage_class_large_file(self): - BLOB_NAME = "blob-name" - STORAGE_CLASS = u"NEARLINE" - TOKEN = "TOKEN" - INCOMPLETE_RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 84, - "done": False, - "rewriteToken": TOKEN, - "resource": {"storageClass": STORAGE_CLASS}, - } - COMPLETE_RESPONSE = { - "totalBytesRewritten": 84, - "objectSize": 84, - "done": True, - "resource": {"storageClass": STORAGE_CLASS}, - } - response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE) - response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE) - connection = _Connection(response_1, response_2) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - blob.update_storage_class("NEARLINE") - - self.assertEqual(blob.storage_class, "NEARLINE") - - def test_update_storage_class_wo_encryption_key(self): - BLOB_NAME = "blob-name" - STORAGE_CLASS = u"NEARLINE" - RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 42, - "done": True, - "resource": {"storageClass": STORAGE_CLASS}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._make_one(BLOB_NAME, bucket=bucket) - - blob.update_storage_class("NEARLINE") - - self.assertEqual(blob.storage_class, "NEARLINE") - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual(kw[0]["query_params"], {}) - SENT = {"storageClass": STORAGE_CLASS} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - # Blob has no key, and therefore the relevant headers are not sent. - self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) - self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) - self.assertNotIn("X-Goog-Encryption-Algorithm", headers) - self.assertNotIn("X-Goog-Encryption-Key", headers) - self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) - - def test_update_storage_class_w_encryption_key_w_user_project(self): - BLOB_NAME = "blob-name" - BLOB_KEY = b"01234567890123456789012345678901" # 32 bytes - BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode("ascii") - BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest() - BLOB_KEY_HASH_B64 = base64.b64encode(BLOB_KEY_HASH).rstrip().decode("ascii") - STORAGE_CLASS = u"NEARLINE" - USER_PROJECT = "user-project-123" - RESPONSE = { - "totalBytesRewritten": 42, - "objectSize": 42, - "done": True, - "resource": {"storageClass": STORAGE_CLASS}, - } - response = ({"status": http_client.OK}, RESPONSE) - connection = _Connection(response) - client = _Client(connection) - bucket = _Bucket(client=client, user_project=USER_PROJECT) - blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY) - - blob.update_storage_class("NEARLINE") - - self.assertEqual(blob.storage_class, "NEARLINE") - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "POST") - PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) - self.assertEqual(kw[0]["path"], PATH) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - SENT = {"storageClass": STORAGE_CLASS} - self.assertEqual(kw[0]["data"], SENT) - - headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} - # Blob has key, and therefore the relevant headers are sent. - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], BLOB_KEY_B64) - self.assertEqual( - headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64 - ) - self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") - self.assertEqual(headers["X-Goog-Encryption-Key"], BLOB_KEY_B64) - self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64) - - def test_cache_control_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CACHE_CONTROL = "no-cache" - properties = {"cacheControl": CACHE_CONTROL} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.cache_control, CACHE_CONTROL) - - def test_cache_control_setter(self): - BLOB_NAME = "blob-name" - CACHE_CONTROL = "no-cache" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.cache_control) - blob.cache_control = CACHE_CONTROL - self.assertEqual(blob.cache_control, CACHE_CONTROL) - - def test_component_count(self): - BUCKET = object() - COMPONENT_COUNT = 42 - blob = self._make_one( - "blob-name", bucket=BUCKET, properties={"componentCount": COMPONENT_COUNT} - ) - self.assertEqual(blob.component_count, COMPONENT_COUNT) - - def test_component_count_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.component_count) - - def test_component_count_string_val(self): - BUCKET = object() - COMPONENT_COUNT = 42 - blob = self._make_one( - "blob-name", - bucket=BUCKET, - properties={"componentCount": str(COMPONENT_COUNT)}, - ) - self.assertEqual(blob.component_count, COMPONENT_COUNT) - - def test_content_disposition_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CONTENT_DISPOSITION = "Attachment; filename=example.jpg" - properties = {"contentDisposition": CONTENT_DISPOSITION} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) - - def test_content_disposition_setter(self): - BLOB_NAME = "blob-name" - CONTENT_DISPOSITION = "Attachment; filename=example.jpg" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.content_disposition) - blob.content_disposition = CONTENT_DISPOSITION - self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) - - def test_content_encoding_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CONTENT_ENCODING = "gzip" - properties = {"contentEncoding": CONTENT_ENCODING} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_encoding, CONTENT_ENCODING) - - def test_content_encoding_setter(self): - BLOB_NAME = "blob-name" - CONTENT_ENCODING = "gzip" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.content_encoding) - blob.content_encoding = CONTENT_ENCODING - self.assertEqual(blob.content_encoding, CONTENT_ENCODING) - - def test_content_language_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CONTENT_LANGUAGE = "pt-BR" - properties = {"contentLanguage": CONTENT_LANGUAGE} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_language, CONTENT_LANGUAGE) - - def test_content_language_setter(self): - BLOB_NAME = "blob-name" - CONTENT_LANGUAGE = "pt-BR" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.content_language) - blob.content_language = CONTENT_LANGUAGE - self.assertEqual(blob.content_language, CONTENT_LANGUAGE) - - def test_content_type_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CONTENT_TYPE = "image/jpeg" - properties = {"contentType": CONTENT_TYPE} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_type, CONTENT_TYPE) - - def test_content_type_setter(self): - BLOB_NAME = "blob-name" - CONTENT_TYPE = "image/jpeg" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.content_type) - blob.content_type = CONTENT_TYPE - self.assertEqual(blob.content_type, CONTENT_TYPE) - - def test_crc32c_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - CRC32C = "DEADBEEF" - properties = {"crc32c": CRC32C} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.crc32c, CRC32C) - - def test_crc32c_setter(self): - BLOB_NAME = "blob-name" - CRC32C = "DEADBEEF" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.crc32c) - blob.crc32c = CRC32C - self.assertEqual(blob.crc32c, CRC32C) - - def test_etag(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - ETAG = "ETAG" - properties = {"etag": ETAG} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.etag, ETAG) - - def test_event_based_hold_getter_missing(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertIsNone(blob.event_based_hold) - - def test_event_based_hold_getter_false(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {"eventBasedHold": False} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertFalse(blob.event_based_hold) - - def test_event_based_hold_getter_true(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {"eventBasedHold": True} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertTrue(blob.event_based_hold) - - def test_event_based_hold_setter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.event_based_hold) - blob.event_based_hold = True - self.assertEqual(blob.event_based_hold, True) - - def test_generation(self): - BUCKET = object() - GENERATION = 42 - blob = self._make_one( - "blob-name", bucket=BUCKET, properties={"generation": GENERATION} - ) - self.assertEqual(blob.generation, GENERATION) - - def test_generation_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.generation) - - def test_generation_string_val(self): - BUCKET = object() - GENERATION = 42 - blob = self._make_one( - "blob-name", bucket=BUCKET, properties={"generation": str(GENERATION)} - ) - self.assertEqual(blob.generation, GENERATION) - - def test_id(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - ID = "ID" - properties = {"id": ID} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.id, ID) - - def test_md5_hash_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - MD5_HASH = "DEADBEEF" - properties = {"md5Hash": MD5_HASH} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.md5_hash, MD5_HASH) - - def test_md5_hash_setter(self): - BLOB_NAME = "blob-name" - MD5_HASH = "DEADBEEF" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.md5_hash) - blob.md5_hash = MD5_HASH - self.assertEqual(blob.md5_hash, MD5_HASH) - - def test_media_link(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - MEDIA_LINK = "http://example.com/media/" - properties = {"mediaLink": MEDIA_LINK} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.media_link, MEDIA_LINK) - - def test_metadata_getter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - METADATA = {"foo": "Foo"} - properties = {"metadata": METADATA} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.metadata, METADATA) - - def test_metadata_setter(self): - BLOB_NAME = "blob-name" - METADATA = {"foo": "Foo"} - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.metadata) - blob.metadata = METADATA - self.assertEqual(blob.metadata, METADATA) - - def test_metadata_setter_w_nan(self): - BLOB_NAME = "blob-name" - METADATA = {"foo": float("nan")} - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.metadata) - blob.metadata = METADATA - value = blob.metadata["foo"] - self.assertIsInstance(value, str) - - def test_metageneration(self): - BUCKET = object() - METAGENERATION = 42 - blob = self._make_one( - "blob-name", bucket=BUCKET, properties={"metageneration": METAGENERATION} - ) - self.assertEqual(blob.metageneration, METAGENERATION) - - def test_metageneration_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.metageneration) - - def test_metageneration_string_val(self): - BUCKET = object() - METAGENERATION = 42 - blob = self._make_one( - "blob-name", - bucket=BUCKET, - properties={"metageneration": str(METAGENERATION)}, - ) - self.assertEqual(blob.metageneration, METAGENERATION) - - def test_owner(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - OWNER = {"entity": "project-owner-12345", "entityId": "23456"} - properties = {"owner": OWNER} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - owner = blob.owner - self.assertEqual(owner["entity"], "project-owner-12345") - self.assertEqual(owner["entityId"], "23456") - - def test_retention_expiration_time(self): - from google.cloud._helpers import _RFC3339_MICROS - from google.cloud._helpers import UTC - - BLOB_NAME = "blob-name" - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {"retentionExpirationTime": TIME_CREATED} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.retention_expiration_time, TIMESTAMP) - - def test_retention_expiration_time_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.retention_expiration_time) - - def test_self_link(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - SELF_LINK = "http://example.com/self/" - properties = {"selfLink": SELF_LINK} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.self_link, SELF_LINK) - - def test_size(self): - BUCKET = object() - SIZE = 42 - blob = self._make_one("blob-name", bucket=BUCKET, properties={"size": SIZE}) - self.assertEqual(blob.size, SIZE) - - def test_size_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.size) - - def test_size_string_val(self): - BUCKET = object() - SIZE = 42 - blob = self._make_one( - "blob-name", bucket=BUCKET, properties={"size": str(SIZE)} - ) - self.assertEqual(blob.size, SIZE) - - def test_storage_class_getter(self): - blob_name = "blob-name" - bucket = _Bucket() - storage_class = "COLDLINE" - properties = {"storageClass": storage_class} - blob = self._make_one(blob_name, bucket=bucket, properties=properties) - self.assertEqual(blob.storage_class, storage_class) - - def test_storage_class_setter(self): - blob_name = "blob-name" - bucket = _Bucket() - storage_class = "COLDLINE" - blob = self._make_one(blob_name, bucket=bucket) - self.assertIsNone(blob.storage_class) - blob.storage_class = storage_class - self.assertEqual(blob.storage_class, storage_class) - self.assertEqual(blob._properties, {"storageClass": storage_class}) - - def test_temporary_hold_getter_missing(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertIsNone(blob.temporary_hold) - - def test_temporary_hold_getter_false(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {"temporaryHold": False} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertFalse(blob.temporary_hold) - - def test_temporary_hold_getter_true(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - properties = {"temporaryHold": True} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertTrue(blob.temporary_hold) - - def test_temporary_hold_setter(self): - BLOB_NAME = "blob-name" - bucket = _Bucket() - blob = self._make_one(BLOB_NAME, bucket=bucket) - self.assertIsNone(blob.temporary_hold) - blob.temporary_hold = True - self.assertEqual(blob.temporary_hold, True) - - def test_time_deleted(self): - from google.cloud._helpers import _RFC3339_MICROS - from google.cloud._helpers import UTC - - BLOB_NAME = "blob-name" - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {"timeDeleted": TIME_DELETED} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.time_deleted, TIMESTAMP) - - def test_time_deleted_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.time_deleted) - - def test_time_created(self): - from google.cloud._helpers import _RFC3339_MICROS - from google.cloud._helpers import UTC - - BLOB_NAME = "blob-name" - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {"timeCreated": TIME_CREATED} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.time_created, TIMESTAMP) - - def test_time_created_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.time_created) - - def test_updated(self): - from google.cloud._helpers import _RFC3339_MICROS - from google.cloud._helpers import UTC - - BLOB_NAME = "blob-name" - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {"updated": UPDATED} - blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.updated, TIMESTAMP) - - def test_updated_unset(self): - BUCKET = object() - blob = self._make_one("blob-name", bucket=BUCKET) - self.assertIsNone(blob.updated) - - def test_from_string_w_valid_uri(self): - from google.cloud.storage.blob import Blob - - connection = _Connection() - client = _Client(connection) - uri = "gs://BUCKET_NAME/b" - blob = Blob.from_string(uri, client) - - self.assertIsInstance(blob, Blob) - self.assertIs(blob.client, client) - self.assertEqual(blob.name, "b") - self.assertEqual(blob.bucket.name, "BUCKET_NAME") - - def test_from_string_w_invalid_uri(self): - from google.cloud.storage.blob import Blob - - connection = _Connection() - client = _Client(connection) - - with pytest.raises(ValueError, match="URI scheme must be gs"): - Blob.from_string("http://bucket_name/b", client) - - def test_from_string_w_domain_name_bucket(self): - from google.cloud.storage.blob import Blob - - connection = _Connection() - client = _Client(connection) - uri = "gs://buckets.example.com/b" - blob = Blob.from_string(uri, client) - - self.assertIsInstance(blob, Blob) - self.assertIs(blob.client, client) - self.assertEqual(blob.name, "b") - self.assertEqual(blob.bucket.name, "buckets.example.com") - - -class Test__quote(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kw): - from google.cloud.storage.blob import _quote - - return _quote(*args, **kw) - - def test_bytes(self): - quoted = self._call_fut(b"\xDE\xAD\xBE\xEF") - self.assertEqual(quoted, "%DE%AD%BE%EF") - - def test_unicode(self): - helicopter = u"\U0001f681" - quoted = self._call_fut(helicopter) - self.assertEqual(quoted, "%F0%9F%9A%81") - - def test_bad_type(self): - with self.assertRaises(TypeError): - self._call_fut(None) - - def test_w_slash_default(self): - with_slash = "foo/bar/baz" - quoted = self._call_fut(with_slash) - self.assertEqual(quoted, "foo%2Fbar%2Fbaz") - - def test_w_slash_w_safe(self): - with_slash = "foo/bar/baz" - quoted_safe = self._call_fut(with_slash, safe=b"/") - self.assertEqual(quoted_safe, with_slash) - - def test_w_tilde(self): - with_tilde = "bam~qux" - quoted = self._call_fut(with_tilde, safe=b"~") - self.assertEqual(quoted, with_tilde) - - -class Test__maybe_rewind(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage.blob import _maybe_rewind - - return _maybe_rewind(*args, **kwargs) - - def test_default(self): - stream = mock.Mock(spec=[u"seek"]) - ret_val = self._call_fut(stream) - self.assertIsNone(ret_val) - - stream.seek.assert_not_called() - - def test_do_not_rewind(self): - stream = mock.Mock(spec=[u"seek"]) - ret_val = self._call_fut(stream, rewind=False) - self.assertIsNone(ret_val) - - stream.seek.assert_not_called() - - def test_do_rewind(self): - stream = mock.Mock(spec=[u"seek"]) - ret_val = self._call_fut(stream, rewind=True) - self.assertIsNone(ret_val) - - stream.seek.assert_called_once_with(0, os.SEEK_SET) - - -class Test__raise_from_invalid_response(unittest.TestCase): - @staticmethod - def _call_fut(error): - from google.cloud.storage.blob import _raise_from_invalid_response - - return _raise_from_invalid_response(error) - - def _helper(self, message, code=http_client.BAD_REQUEST, args=()): - import requests - - from google.resumable_media import InvalidResponse - from google.api_core import exceptions - - response = requests.Response() - response.request = requests.Request("GET", "http://example.com").prepare() - response.status_code = code - error = InvalidResponse(response, message, *args) - - with self.assertRaises(exceptions.GoogleAPICallError) as exc_info: - self._call_fut(error) - - return exc_info - - def test_default(self): - message = "Failure" - exc_info = self._helper(message) - expected = "GET http://example.com/: {}".format(message) - self.assertEqual(exc_info.exception.message, expected) - self.assertEqual(exc_info.exception.errors, []) - - def test_w_206_and_args(self): - message = "Failure" - args = ("one", "two") - exc_info = self._helper(message, code=http_client.PARTIAL_CONTENT, args=args) - expected = "GET http://example.com/: {}".format((message,) + args) - self.assertEqual(exc_info.exception.message, expected) - self.assertEqual(exc_info.exception.errors, []) - - -class Test__add_query_parameters(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage.blob import _add_query_parameters - - return _add_query_parameters(*args, **kwargs) - - def test_w_empty_list(self): - BASE_URL = "https://test.example.com/base" - self.assertEqual(self._call_fut(BASE_URL, []), BASE_URL) - - def test_wo_existing_qs(self): - BASE_URL = "https://test.example.com/base" - NV_LIST = [("one", "One"), ("two", "Two")] - expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) - self.assertEqual( - self._call_fut(BASE_URL, NV_LIST), "{}?{}".format(BASE_URL, expected) - ) - - def test_w_existing_qs(self): - BASE_URL = "https://test.example.com/base?one=Three" - NV_LIST = [("one", "One"), ("two", "Two")] - expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) - self.assertEqual( - self._call_fut(BASE_URL, NV_LIST), "{}&{}".format(BASE_URL, expected) - ) - - -class _Connection(object): - - API_BASE_URL = "http://example.com" - USER_AGENT = "testing 1.2.3" - credentials = object() - - def __init__(self, *responses): - self._responses = responses[:] - self._requested = [] - self._signed = [] - - def _respond(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - def api_request(self, **kw): - from google.cloud.exceptions import NotFound - - info, content = self._respond(**kw) - if info.get("status") == http_client.NOT_FOUND: - raise NotFound(info) - return content - - -class _Bucket(object): - def __init__(self, client=None, name="name", user_project=None): - if client is None: - connection = _Connection() - client = _Client(connection) - self.client = client - self._blobs = {} - self._copied = [] - self._deleted = [] - self.name = name - self.path = "/b/" + name - self.user_project = user_project - - def delete_blob(self, blob_name, client=None, generation=None): - del self._blobs[blob_name] - self._deleted.append((blob_name, client, generation)) - - -class _Client(object): - def __init__(self, connection): - self._base_connection = connection - - @property - def _connection(self): - return self._base_connection - - @property - def _credentials(self): - return self._base_connection.credentials diff --git a/storage/tests/unit/test_bucket.py b/storage/tests/unit/test_bucket.py deleted file mode 100644 index 68399b3c8962..000000000000 --- a/storage/tests/unit/test_bucket.py +++ /dev/null @@ -1,3069 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import unittest - -import mock -import pytest - - -def _make_connection(*responses): - import google.cloud.storage._http - - mock_connection = mock.create_autospec(google.cloud.storage._http.Connection) - mock_connection.user_agent = "testing 1.2.3" - mock_connection.api_request.side_effect = list(responses) - return mock_connection - - -def _create_signing_credentials(): - import google.auth.credentials - - class _SigningCredentials( - google.auth.credentials.Credentials, google.auth.credentials.Signing - ): - pass - - credentials = mock.Mock(spec=_SigningCredentials) - - return credentials - - -class Test_LifecycleRuleConditions(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.bucket import LifecycleRuleConditions - - return LifecycleRuleConditions - - def _make_one(self, **kw): - return self._get_target_class()(**kw) - - def test_ctor_wo_conditions(self): - with self.assertRaises(ValueError): - self._make_one() - - def test_ctor_w_age_and_matches_storage_class(self): - conditions = self._make_one(age=10, matches_storage_class=["COLDLINE"]) - expected = {"age": 10, "matchesStorageClass": ["COLDLINE"]} - self.assertEqual(dict(conditions), expected) - self.assertEqual(conditions.age, 10) - self.assertIsNone(conditions.created_before) - self.assertIsNone(conditions.is_live) - self.assertEqual(conditions.matches_storage_class, ["COLDLINE"]) - self.assertIsNone(conditions.number_of_newer_versions) - - def test_ctor_w_created_before_and_is_live(self): - import datetime - - before = datetime.date(2018, 8, 1) - conditions = self._make_one(created_before=before, is_live=False) - expected = {"createdBefore": "2018-08-01", "isLive": False} - self.assertEqual(dict(conditions), expected) - self.assertIsNone(conditions.age) - self.assertEqual(conditions.created_before, before) - self.assertEqual(conditions.is_live, False) - self.assertIsNone(conditions.matches_storage_class) - self.assertIsNone(conditions.number_of_newer_versions) - - def test_ctor_w_number_of_newer_versions(self): - conditions = self._make_one(number_of_newer_versions=3) - expected = {"numNewerVersions": 3} - self.assertEqual(dict(conditions), expected) - self.assertIsNone(conditions.age) - self.assertIsNone(conditions.created_before) - self.assertIsNone(conditions.is_live) - self.assertIsNone(conditions.matches_storage_class) - self.assertEqual(conditions.number_of_newer_versions, 3) - - def test_from_api_repr(self): - import datetime - - before = datetime.date(2018, 8, 1) - klass = self._get_target_class() - resource = { - "age": 10, - "createdBefore": "2018-08-01", - "isLive": True, - "matchesStorageClass": ["COLDLINE"], - "numNewerVersions": 3, - } - conditions = klass.from_api_repr(resource) - self.assertEqual(conditions.age, 10) - self.assertEqual(conditions.created_before, before) - self.assertEqual(conditions.is_live, True) - self.assertEqual(conditions.matches_storage_class, ["COLDLINE"]) - self.assertEqual(conditions.number_of_newer_versions, 3) - - -class Test_LifecycleRuleDelete(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.bucket import LifecycleRuleDelete - - return LifecycleRuleDelete - - def _make_one(self, **kw): - return self._get_target_class()(**kw) - - def test_ctor_wo_conditions(self): - with self.assertRaises(ValueError): - self._make_one() - - def test_ctor_w_condition(self): - rule = self._make_one(age=10, matches_storage_class=["COLDLINE"]) - expected = { - "action": {"type": "Delete"}, - "condition": {"age": 10, "matchesStorageClass": ["COLDLINE"]}, - } - self.assertEqual(dict(rule), expected) - - def test_from_api_repr(self): - klass = self._get_target_class() - conditions = { - "age": 10, - "createdBefore": "2018-08-01", - "isLive": True, - "matchesStorageClass": ["COLDLINE"], - "numNewerVersions": 3, - } - resource = {"action": {"type": "Delete"}, "condition": conditions} - rule = klass.from_api_repr(resource) - self.assertEqual(dict(rule), resource) - - -class Test_LifecycleRuleSetStorageClass(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.bucket import LifecycleRuleSetStorageClass - - return LifecycleRuleSetStorageClass - - def _make_one(self, **kw): - return self._get_target_class()(**kw) - - def test_ctor_wo_conditions(self): - with self.assertRaises(ValueError): - self._make_one(storage_class="COLDLINE") - - def test_ctor_w_condition(self): - rule = self._make_one( - storage_class="COLDLINE", age=10, matches_storage_class=["NEARLINE"] - ) - expected = { - "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, - "condition": {"age": 10, "matchesStorageClass": ["NEARLINE"]}, - } - self.assertEqual(dict(rule), expected) - - def test_from_api_repr(self): - klass = self._get_target_class() - conditions = { - "age": 10, - "createdBefore": "2018-08-01", - "isLive": True, - "matchesStorageClass": ["NEARLINE"], - "numNewerVersions": 3, - } - resource = { - "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, - "condition": conditions, - } - rule = klass.from_api_repr(resource) - self.assertEqual(dict(rule), resource) - - -class Test_IAMConfiguration(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.bucket import IAMConfiguration - - return IAMConfiguration - - def _make_one(self, bucket, **kw): - return self._get_target_class()(bucket, **kw) - - @staticmethod - def _make_bucket(): - from google.cloud.storage.bucket import Bucket - - return mock.create_autospec(Bucket, instance=True) - - def test_ctor_defaults(self): - bucket = self._make_bucket() - - config = self._make_one(bucket) - - self.assertIs(config.bucket, bucket) - self.assertFalse(config.uniform_bucket_level_access_enabled) - self.assertIsNone(config.uniform_bucket_level_access_locked_time) - self.assertFalse(config.bucket_policy_only_enabled) - self.assertIsNone(config.bucket_policy_only_locked_time) - - def test_ctor_explicit_ubla(self): - import datetime - import pytz - - bucket = self._make_bucket() - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - - config = self._make_one( - bucket, - uniform_bucket_level_access_enabled=True, - uniform_bucket_level_access_locked_time=now, - ) - - self.assertIs(config.bucket, bucket) - self.assertTrue(config.uniform_bucket_level_access_enabled) - self.assertEqual(config.uniform_bucket_level_access_locked_time, now) - self.assertTrue(config.bucket_policy_only_enabled) - self.assertEqual(config.bucket_policy_only_locked_time, now) - - def test_ctor_explicit_bpo(self): - import datetime - import pytz - - bucket = self._make_bucket() - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - - config = pytest.deprecated_call( - self._make_one, - bucket, - bucket_policy_only_enabled=True, - bucket_policy_only_locked_time=now, - ) - - self.assertIs(config.bucket, bucket) - self.assertTrue(config.uniform_bucket_level_access_enabled) - self.assertEqual(config.uniform_bucket_level_access_locked_time, now) - self.assertTrue(config.bucket_policy_only_enabled) - self.assertEqual(config.bucket_policy_only_locked_time, now) - - def test_ctor_ubla_and_bpo_enabled(self): - bucket = self._make_bucket() - - with self.assertRaises(ValueError): - self._make_one( - bucket, - uniform_bucket_level_access_enabled=True, - bucket_policy_only_enabled=True, - ) - - def test_ctor_ubla_and_bpo_time(self): - import datetime - import pytz - - bucket = self._make_bucket() - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - - with self.assertRaises(ValueError): - self._make_one( - bucket, - uniform_bucket_level_access_enabled=True, - uniform_bucket_level_access_locked_time=now, - bucket_policy_only_locked_time=now, - ) - - def test_from_api_repr_w_empty_resource(self): - klass = self._get_target_class() - bucket = self._make_bucket() - resource = {} - - config = klass.from_api_repr(resource, bucket) - - self.assertIs(config.bucket, bucket) - self.assertFalse(config.bucket_policy_only_enabled) - self.assertIsNone(config.bucket_policy_only_locked_time) - - def test_from_api_repr_w_empty_bpo(self): - klass = self._get_target_class() - bucket = self._make_bucket() - resource = {"uniformBucketLevelAccess": {}} - - config = klass.from_api_repr(resource, bucket) - - self.assertIs(config.bucket, bucket) - self.assertFalse(config.bucket_policy_only_enabled) - self.assertIsNone(config.bucket_policy_only_locked_time) - - def test_from_api_repr_w_disabled(self): - klass = self._get_target_class() - bucket = self._make_bucket() - resource = {"uniformBucketLevelAccess": {"enabled": False}} - - config = klass.from_api_repr(resource, bucket) - - self.assertIs(config.bucket, bucket) - self.assertFalse(config.bucket_policy_only_enabled) - self.assertIsNone(config.bucket_policy_only_locked_time) - - def test_from_api_repr_w_enabled(self): - import datetime - import pytz - from google.cloud._helpers import _datetime_to_rfc3339 - - klass = self._get_target_class() - bucket = self._make_bucket() - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - resource = { - "uniformBucketLevelAccess": { - "enabled": True, - "lockedTime": _datetime_to_rfc3339(now), - } - } - - config = klass.from_api_repr(resource, bucket) - - self.assertIs(config.bucket, bucket) - self.assertTrue(config.uniform_bucket_level_access_enabled) - self.assertEqual(config.uniform_bucket_level_access_locked_time, now) - self.assertTrue(config.bucket_policy_only_enabled) - self.assertEqual(config.bucket_policy_only_locked_time, now) - - def test_uniform_bucket_level_access_enabled_setter(self): - bucket = self._make_bucket() - config = self._make_one(bucket) - - config.uniform_bucket_level_access_enabled = True - self.assertTrue(config.bucket_policy_only_enabled) - - self.assertTrue(config["uniformBucketLevelAccess"]["enabled"]) - bucket._patch_property.assert_called_once_with("iamConfiguration", config) - - def test_bucket_policy_only_enabled_setter(self): - bucket = self._make_bucket() - config = self._make_one(bucket) - - with pytest.deprecated_call(): - config.bucket_policy_only_enabled = True - - self.assertTrue(config.uniform_bucket_level_access_enabled) - self.assertTrue(config["uniformBucketLevelAccess"]["enabled"]) - bucket._patch_property.assert_called_once_with("iamConfiguration", config) - - -class Test_Bucket(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.bucket import Bucket - - return Bucket - - def _make_one(self, client=None, name=None, properties=None, user_project=None): - if client is None: - connection = _Connection() - client = _Client(connection) - if user_project is None: - bucket = self._get_target_class()(client, name=name) - else: - bucket = self._get_target_class()( - client, name=name, user_project=user_project - ) - bucket._properties = properties or {} - return bucket - - def test_ctor_w_invalid_name(self): - NAME = "#invalid" - with self.assertRaises(ValueError): - self._make_one(name=NAME) - - def test_ctor(self): - NAME = "name" - properties = {"key": "value"} - bucket = self._make_one(name=NAME, properties=properties) - self.assertEqual(bucket.name, NAME) - self.assertEqual(bucket._properties, properties) - self.assertEqual(list(bucket._changes), []) - self.assertFalse(bucket._acl.loaded) - self.assertIs(bucket._acl.bucket, bucket) - self.assertFalse(bucket._default_object_acl.loaded) - self.assertIs(bucket._default_object_acl.bucket, bucket) - self.assertEqual(list(bucket._label_removals), []) - self.assertIsNone(bucket.user_project) - - def test_ctor_w_user_project(self): - NAME = "name" - USER_PROJECT = "user-project-123" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(client, name=NAME, user_project=USER_PROJECT) - self.assertEqual(bucket.name, NAME) - self.assertEqual(bucket._properties, {}) - self.assertEqual(list(bucket._changes), []) - self.assertFalse(bucket._acl.loaded) - self.assertIs(bucket._acl.bucket, bucket) - self.assertFalse(bucket._default_object_acl.loaded) - self.assertIs(bucket._default_object_acl.bucket, bucket) - self.assertEqual(list(bucket._label_removals), []) - self.assertEqual(bucket.user_project, USER_PROJECT) - - def test_blob_wo_keys(self): - from google.cloud.storage.blob import Blob - - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "BLOB_NAME" - CHUNK_SIZE = 1024 * 1024 - - bucket = self._make_one(name=BUCKET_NAME) - blob = bucket.blob(BLOB_NAME, chunk_size=CHUNK_SIZE) - self.assertIsInstance(blob, Blob) - self.assertIs(blob.bucket, bucket) - self.assertIs(blob.client, bucket.client) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.chunk_size, CHUNK_SIZE) - self.assertIsNone(blob._encryption_key) - self.assertIsNone(blob.kms_key_name) - - def test_blob_w_encryption_key(self): - from google.cloud.storage.blob import Blob - - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "BLOB_NAME" - CHUNK_SIZE = 1024 * 1024 - KEY = b"01234567890123456789012345678901" # 32 bytes - - bucket = self._make_one(name=BUCKET_NAME) - blob = bucket.blob(BLOB_NAME, chunk_size=CHUNK_SIZE, encryption_key=KEY) - self.assertIsInstance(blob, Blob) - self.assertIs(blob.bucket, bucket) - self.assertIs(blob.client, bucket.client) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.chunk_size, CHUNK_SIZE) - self.assertEqual(blob._encryption_key, KEY) - self.assertIsNone(blob.kms_key_name) - - def test_blob_w_generation(self): - from google.cloud.storage.blob import Blob - - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "BLOB_NAME" - GENERATION = 123 - - bucket = self._make_one(name=BUCKET_NAME) - blob = bucket.blob(BLOB_NAME, generation=GENERATION) - self.assertIsInstance(blob, Blob) - self.assertIs(blob.bucket, bucket) - self.assertIs(blob.client, bucket.client) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.generation, GENERATION) - - def test_blob_w_kms_key_name(self): - from google.cloud.storage.blob import Blob - - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "BLOB_NAME" - CHUNK_SIZE = 1024 * 1024 - KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - - bucket = self._make_one(name=BUCKET_NAME) - blob = bucket.blob(BLOB_NAME, chunk_size=CHUNK_SIZE, kms_key_name=KMS_RESOURCE) - self.assertIsInstance(blob, Blob) - self.assertIs(blob.bucket, bucket) - self.assertIs(blob.client, bucket.client) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.chunk_size, CHUNK_SIZE) - self.assertIsNone(blob._encryption_key) - self.assertEqual(blob.kms_key_name, KMS_RESOURCE) - - def test_notification_defaults(self): - from google.cloud.storage.notification import BucketNotification - from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - PROJECT = "PROJECT" - BUCKET_NAME = "BUCKET_NAME" - TOPIC_NAME = "TOPIC_NAME" - client = _Client(_Connection(), project=PROJECT) - bucket = self._make_one(client, name=BUCKET_NAME) - - notification = bucket.notification(TOPIC_NAME) - - self.assertIsInstance(notification, BucketNotification) - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_project, PROJECT) - self.assertIsNone(notification.custom_attributes) - self.assertIsNone(notification.event_types) - self.assertIsNone(notification.blob_name_prefix) - self.assertEqual(notification.payload_format, NONE_PAYLOAD_FORMAT) - - def test_notification_explicit(self): - from google.cloud.storage.notification import ( - BucketNotification, - OBJECT_FINALIZE_EVENT_TYPE, - OBJECT_DELETE_EVENT_TYPE, - JSON_API_V1_PAYLOAD_FORMAT, - ) - - PROJECT = "PROJECT" - BUCKET_NAME = "BUCKET_NAME" - TOPIC_NAME = "TOPIC_NAME" - TOPIC_ALT_PROJECT = "topic-project-456" - CUSTOM_ATTRIBUTES = {"attr1": "value1", "attr2": "value2"} - EVENT_TYPES = [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE] - BLOB_NAME_PREFIX = "blob-name-prefix/" - client = _Client(_Connection(), project=PROJECT) - bucket = self._make_one(client, name=BUCKET_NAME) - - notification = bucket.notification( - TOPIC_NAME, - topic_project=TOPIC_ALT_PROJECT, - custom_attributes=CUSTOM_ATTRIBUTES, - event_types=EVENT_TYPES, - blob_name_prefix=BLOB_NAME_PREFIX, - payload_format=JSON_API_V1_PAYLOAD_FORMAT, - ) - - self.assertIsInstance(notification, BucketNotification) - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_project, TOPIC_ALT_PROJECT) - self.assertEqual(notification.custom_attributes, CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, EVENT_TYPES) - self.assertEqual(notification.blob_name_prefix, BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, JSON_API_V1_PAYLOAD_FORMAT) - - def test_bucket_name_value(self): - BUCKET_NAME = "bucket-name" - self._make_one(name=BUCKET_NAME) - - bad_start_bucket_name = "/testing123" - with self.assertRaises(ValueError): - self._make_one(name=bad_start_bucket_name) - - bad_end_bucket_name = "testing123/" - with self.assertRaises(ValueError): - self._make_one(name=bad_end_bucket_name) - - def test_user_project(self): - BUCKET_NAME = "name" - USER_PROJECT = "user-project-123" - bucket = self._make_one(name=BUCKET_NAME) - bucket._user_project = USER_PROJECT - self.assertEqual(bucket.user_project, USER_PROJECT) - - def test_exists_miss(self): - from google.cloud.exceptions import NotFound - - class _FakeConnection(object): - - _called_with = [] - - @classmethod - def api_request(cls, *args, **kwargs): - cls._called_with.append((args, kwargs)) - raise NotFound(args) - - BUCKET_NAME = "bucket-name" - bucket = self._make_one(name=BUCKET_NAME) - client = _Client(_FakeConnection) - self.assertFalse(bucket.exists(client=client)) - expected_called_kwargs = { - "method": "GET", - "path": bucket.path, - "query_params": {"fields": "name"}, - "_target_object": None, - } - expected_cw = [((), expected_called_kwargs)] - self.assertEqual(_FakeConnection._called_with, expected_cw) - - def test_exists_hit_w_user_project(self): - USER_PROJECT = "user-project-123" - - class _FakeConnection(object): - - _called_with = [] - - @classmethod - def api_request(cls, *args, **kwargs): - cls._called_with.append((args, kwargs)) - # exists() does not use the return value - return object() - - BUCKET_NAME = "bucket-name" - bucket = self._make_one(name=BUCKET_NAME, user_project=USER_PROJECT) - client = _Client(_FakeConnection) - self.assertTrue(bucket.exists(client=client)) - expected_called_kwargs = { - "method": "GET", - "path": bucket.path, - "query_params": {"fields": "name", "userProject": USER_PROJECT}, - "_target_object": None, - } - expected_cw = [((), expected_called_kwargs)] - self.assertEqual(_FakeConnection._called_with, expected_cw) - - def test_create_w_user_project(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - USER_PROJECT = "user-project-123" - - client = Client(project=PROJECT) - client._base_connection = _Connection() - - bucket = self._make_one(client, BUCKET_NAME, user_project=USER_PROJECT) - - with self.assertRaises(ValueError): - bucket.create() - - def test_create_w_missing_client_project(self): - from google.cloud.storage.client import Client - - BUCKET_NAME = "bucket-name" - - client = Client(project=None) - bucket = self._make_one(client, BUCKET_NAME) - - with self.assertRaises(ValueError): - bucket.create() - - def test_create_w_explicit_project(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - OTHER_PROJECT = "other-project-123" - DATA = {"name": BUCKET_NAME} - connection = _make_connection(DATA) - - client = Client(project=PROJECT) - client._base_connection = connection - - bucket = self._make_one(client, BUCKET_NAME) - bucket.create(project=OTHER_PROJECT) - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={"project": OTHER_PROJECT}, - data=DATA, - _target_object=bucket, - ) - - def test_create_w_explicit_location(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - LOCATION = "us-central1" - DATA = {"location": LOCATION, "name": BUCKET_NAME} - - connection = _make_connection( - DATA, "{'location': 'us-central1', 'name': 'bucket-name'}" - ) - - client = Client(project=PROJECT) - client._base_connection = connection - - bucket = self._make_one(client, BUCKET_NAME) - bucket.create(location=LOCATION) - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - data=DATA, - _target_object=bucket, - query_params={"project": "PROJECT"}, - ) - self.assertEqual(bucket.location, LOCATION) - - def test_create_hit(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - DATA = {"name": BUCKET_NAME} - connection = _make_connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - - bucket = self._make_one(client=client, name=BUCKET_NAME) - bucket.create() - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={"project": PROJECT}, - data=DATA, - _target_object=bucket, - ) - - def test_create_w_extra_properties(self): - from google.cloud.storage.client import Client - - BUCKET_NAME = "bucket-name" - PROJECT = "PROJECT" - CORS = [ - { - "maxAgeSeconds": 60, - "methods": ["*"], - "origin": ["https://example.com/frontend"], - "responseHeader": ["X-Custom-Header"], - } - ] - LIFECYCLE_RULES = [{"action": {"type": "Delete"}, "condition": {"age": 365}}] - LOCATION = "eu" - LABELS = {"color": "red", "flavor": "cherry"} - STORAGE_CLASS = "NEARLINE" - DATA = { - "name": BUCKET_NAME, - "cors": CORS, - "lifecycle": {"rule": LIFECYCLE_RULES}, - "location": LOCATION, - "storageClass": STORAGE_CLASS, - "versioning": {"enabled": True}, - "billing": {"requesterPays": True}, - "labels": LABELS, - } - - connection = _make_connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - - bucket = self._make_one(client=client, name=BUCKET_NAME) - bucket.cors = CORS - bucket.lifecycle_rules = LIFECYCLE_RULES - bucket.storage_class = STORAGE_CLASS - bucket.versioning_enabled = True - bucket.requester_pays = True - bucket.labels = LABELS - bucket.create(location=LOCATION) - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={"project": PROJECT}, - data=DATA, - _target_object=bucket, - ) - - def test_create_w_predefined_acl_invalid(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - DATA = {"name": BUCKET_NAME} - connection = _Connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - bucket = self._make_one(client=client, name=BUCKET_NAME) - - with self.assertRaises(ValueError): - bucket.create(predefined_acl="bogus") - - def test_create_w_predefined_acl_valid(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - DATA = {"name": BUCKET_NAME} - connection = _Connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - bucket = self._make_one(client=client, name=BUCKET_NAME) - bucket.create(predefined_acl="publicRead") - - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], "/b") - expected_qp = {"project": PROJECT, "predefinedAcl": "publicRead"} - self.assertEqual(kw["query_params"], expected_qp) - self.assertEqual(kw["data"], DATA) - - def test_create_w_predefined_default_object_acl_invalid(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - DATA = {"name": BUCKET_NAME} - connection = _Connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - bucket = self._make_one(client=client, name=BUCKET_NAME) - - with self.assertRaises(ValueError): - bucket.create(predefined_default_object_acl="bogus") - - def test_create_w_predefined_default_object_acl_valid(self): - from google.cloud.storage.client import Client - - PROJECT = "PROJECT" - BUCKET_NAME = "bucket-name" - DATA = {"name": BUCKET_NAME} - connection = _Connection(DATA) - client = Client(project=PROJECT) - client._base_connection = connection - bucket = self._make_one(client=client, name=BUCKET_NAME) - bucket.create(predefined_default_object_acl="publicRead") - - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], "/b") - expected_qp = {"project": PROJECT, "predefinedDefaultObjectAcl": "publicRead"} - self.assertEqual(kw["query_params"], expected_qp) - self.assertEqual(kw["data"], DATA) - - def test_acl_property(self): - from google.cloud.storage.acl import BucketACL - - bucket = self._make_one() - acl = bucket.acl - self.assertIsInstance(acl, BucketACL) - self.assertIs(acl, bucket._acl) - - def test_default_object_acl_property(self): - from google.cloud.storage.acl import DefaultObjectACL - - bucket = self._make_one() - acl = bucket.default_object_acl - self.assertIsInstance(acl, DefaultObjectACL) - self.assertIs(acl, bucket._default_object_acl) - - def test_path_no_name(self): - bucket = self._make_one() - self.assertRaises(ValueError, getattr, bucket, "path") - - def test_path_w_name(self): - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertEqual(bucket.path, "/b/%s" % NAME) - - def test_get_blob_miss(self): - NAME = "name" - NONESUCH = "nonesuch" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(name=NAME) - result = bucket.get_blob(NONESUCH, client=client) - self.assertIsNone(result) - kw, = connection._requested - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, NONESUCH)) - - def test_get_blob_hit_w_user_project(self): - NAME = "name" - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - connection = _Connection({"name": BLOB_NAME}) - client = _Client(connection) - bucket = self._make_one(name=NAME, user_project=USER_PROJECT) - blob = bucket.get_blob(BLOB_NAME, client=client) - self.assertIs(blob.bucket, bucket) - self.assertEqual(blob.name, BLOB_NAME) - kw, = connection._requested - expected_qp = {"userProject": USER_PROJECT, "projection": "noAcl"} - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw["query_params"], expected_qp) - - def test_get_blob_hit_w_generation(self): - NAME = "name" - BLOB_NAME = "blob-name" - GENERATION = 1512565576797178 - connection = _Connection({"name": BLOB_NAME, "generation": GENERATION}) - client = _Client(connection) - bucket = self._make_one(name=NAME) - blob = bucket.get_blob(BLOB_NAME, client=client, generation=GENERATION) - self.assertIs(blob.bucket, bucket) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.generation, GENERATION) - kw, = connection._requested - expected_qp = {"generation": GENERATION, "projection": "noAcl"} - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw["query_params"], expected_qp) - - def test_get_blob_hit_with_kwargs(self): - from google.cloud.storage.blob import _get_encryption_headers - - NAME = "name" - BLOB_NAME = "blob-name" - CHUNK_SIZE = 1024 * 1024 - KEY = b"01234567890123456789012345678901" # 32 bytes - - connection = _Connection({"name": BLOB_NAME}) - client = _Client(connection) - bucket = self._make_one(name=NAME) - blob = bucket.get_blob( - BLOB_NAME, client=client, encryption_key=KEY, chunk_size=CHUNK_SIZE - ) - self.assertIs(blob.bucket, bucket) - self.assertEqual(blob.name, BLOB_NAME) - kw, = connection._requested - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw["headers"], _get_encryption_headers(KEY)) - self.assertEqual(blob.chunk_size, CHUNK_SIZE) - self.assertEqual(blob._encryption_key, KEY) - - def test_list_blobs_defaults(self): - NAME = "name" - connection = _Connection({"items": []}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - iterator = bucket.list_blobs() - blobs = list(iterator) - self.assertEqual(blobs, []) - kw, = connection._requested - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o" % NAME) - self.assertEqual(kw["query_params"], {"projection": "noAcl"}) - - def test_list_blobs_w_all_arguments_and_user_project(self): - NAME = "name" - USER_PROJECT = "user-project-123" - MAX_RESULTS = 10 - PAGE_TOKEN = "ABCD" - PREFIX = "subfolder" - DELIMITER = "/" - VERSIONS = True - PROJECTION = "full" - FIELDS = "items/contentLanguage,nextPageToken" - EXPECTED = { - "maxResults": 10, - "pageToken": PAGE_TOKEN, - "prefix": PREFIX, - "delimiter": DELIMITER, - "versions": VERSIONS, - "projection": PROJECTION, - "fields": FIELDS, - "userProject": USER_PROJECT, - } - connection = _Connection({"items": []}) - client = _Client(connection) - bucket = self._make_one(name=NAME, user_project=USER_PROJECT) - iterator = bucket.list_blobs( - max_results=MAX_RESULTS, - page_token=PAGE_TOKEN, - prefix=PREFIX, - delimiter=DELIMITER, - versions=VERSIONS, - projection=PROJECTION, - fields=FIELDS, - client=client, - ) - blobs = list(iterator) - self.assertEqual(blobs, []) - kw, = connection._requested - self.assertEqual(kw["method"], "GET") - self.assertEqual(kw["path"], "/b/%s/o" % NAME) - self.assertEqual(kw["query_params"], EXPECTED) - - def test_list_notifications(self): - from google.cloud.storage.notification import BucketNotification - from google.cloud.storage.notification import _TOPIC_REF_FMT - from google.cloud.storage.notification import ( - JSON_API_V1_PAYLOAD_FORMAT, - NONE_PAYLOAD_FORMAT, - ) - - NAME = "name" - - topic_refs = [("my-project-123", "topic-1"), ("other-project-456", "topic-2")] - - resources = [ - { - "topic": _TOPIC_REF_FMT.format(*topic_refs[0]), - "id": "1", - "etag": "DEADBEEF", - "selfLink": "https://example.com/notification/1", - "payload_format": NONE_PAYLOAD_FORMAT, - }, - { - "topic": _TOPIC_REF_FMT.format(*topic_refs[1]), - "id": "2", - "etag": "FACECABB", - "selfLink": "https://example.com/notification/2", - "payload_format": JSON_API_V1_PAYLOAD_FORMAT, - }, - ] - connection = _Connection({"items": resources}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - - notifications = list(bucket.list_notifications()) - - self.assertEqual(len(notifications), len(resources)) - for notification, resource, topic_ref in zip( - notifications, resources, topic_refs - ): - self.assertIsInstance(notification, BucketNotification) - self.assertEqual(notification.topic_project, topic_ref[0]) - self.assertEqual(notification.topic_name, topic_ref[1]) - self.assertEqual(notification.notification_id, resource["id"]) - self.assertEqual(notification.etag, resource["etag"]) - self.assertEqual(notification.self_link, resource["selfLink"]) - self.assertEqual( - notification.custom_attributes, resource.get("custom_attributes") - ) - self.assertEqual(notification.event_types, resource.get("event_types")) - self.assertEqual( - notification.blob_name_prefix, resource.get("blob_name_prefix") - ) - self.assertEqual( - notification.payload_format, resource.get("payload_format") - ) - - def test_delete_miss(self): - from google.cloud.exceptions import NotFound - - NAME = "name" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete) - expected_cw = [ - { - "method": "DELETE", - "path": bucket.path, - "query_params": {}, - "_target_object": None, - } - ] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_hit_with_user_project(self): - NAME = "name" - USER_PROJECT = "user-project-123" - GET_BLOBS_RESP = {"items": []} - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - result = bucket.delete(force=True) - self.assertIsNone(result) - expected_cw = [ - { - "method": "DELETE", - "path": bucket.path, - "_target_object": None, - "query_params": {"userProject": USER_PROJECT}, - } - ] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_force_delete_blobs(self): - NAME = "name" - BLOB_NAME1 = "blob-name1" - BLOB_NAME2 = "blob-name2" - GET_BLOBS_RESP = {"items": [{"name": BLOB_NAME1}, {"name": BLOB_NAME2}]} - DELETE_BLOB1_RESP = DELETE_BLOB2_RESP = {} - connection = _Connection(GET_BLOBS_RESP, DELETE_BLOB1_RESP, DELETE_BLOB2_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - result = bucket.delete(force=True) - self.assertIsNone(result) - expected_cw = [ - { - "method": "DELETE", - "path": bucket.path, - "query_params": {}, - "_target_object": None, - } - ] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_force_miss_blobs(self): - NAME = "name" - BLOB_NAME = "blob-name1" - GET_BLOBS_RESP = {"items": [{"name": BLOB_NAME}]} - # Note the connection does not have a response for the blob. - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - result = bucket.delete(force=True) - self.assertIsNone(result) - expected_cw = [ - { - "method": "DELETE", - "path": bucket.path, - "query_params": {}, - "_target_object": None, - } - ] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_too_many(self): - NAME = "name" - BLOB_NAME1 = "blob-name1" - BLOB_NAME2 = "blob-name2" - GET_BLOBS_RESP = {"items": [{"name": BLOB_NAME1}, {"name": BLOB_NAME2}]} - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - - # Make the Bucket refuse to delete with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 - self.assertRaises(ValueError, bucket.delete, force=True) - self.assertEqual(connection._deleted_buckets, []) - - def test_delete_blob_miss(self): - from google.cloud.exceptions import NotFound - - NAME = "name" - NONESUCH = "nonesuch" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete_blob, NONESUCH) - kw, = connection._requested - self.assertEqual(kw["method"], "DELETE") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, NONESUCH)) - self.assertEqual(kw["query_params"], {}) - - def test_delete_blob_hit_with_user_project(self): - NAME = "name" - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - connection = _Connection({}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - result = bucket.delete_blob(BLOB_NAME) - self.assertIsNone(result) - kw, = connection._requested - self.assertEqual(kw["method"], "DELETE") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw["query_params"], {"userProject": USER_PROJECT}) - - def test_delete_blob_hit_with_generation(self): - NAME = "name" - BLOB_NAME = "blob-name" - GENERATION = 1512565576797178 - connection = _Connection({}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - result = bucket.delete_blob(BLOB_NAME, generation=GENERATION) - self.assertIsNone(result) - kw, = connection._requested - self.assertEqual(kw["method"], "DELETE") - self.assertEqual(kw["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw["query_params"], {"generation": GENERATION}) - - def test_delete_blobs_empty(self): - NAME = "name" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.delete_blobs([]) - self.assertEqual(connection._requested, []) - - def test_delete_blobs_hit_w_user_project(self): - NAME = "name" - BLOB_NAME = "blob-name" - USER_PROJECT = "user-project-123" - connection = _Connection({}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - bucket.delete_blobs([BLOB_NAME]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "DELETE") - self.assertEqual(kw[0]["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - - def test_delete_blobs_miss_no_on_error(self): - from google.cloud.exceptions import NotFound - - NAME = "name" - BLOB_NAME = "blob-name" - NONESUCH = "nonesuch" - connection = _Connection({}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete_blobs, [BLOB_NAME, NONESUCH]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]["method"], "DELETE") - self.assertEqual(kw[0]["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw[1]["method"], "DELETE") - self.assertEqual(kw[1]["path"], "/b/%s/o/%s" % (NAME, NONESUCH)) - - def test_delete_blobs_miss_w_on_error(self): - NAME = "name" - BLOB_NAME = "blob-name" - NONESUCH = "nonesuch" - connection = _Connection({}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - errors = [] - bucket.delete_blobs([BLOB_NAME, NONESUCH], errors.append) - self.assertEqual(errors, [NONESUCH]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]["method"], "DELETE") - self.assertEqual(kw[0]["path"], "/b/%s/o/%s" % (NAME, BLOB_NAME)) - self.assertEqual(kw[1]["method"], "DELETE") - self.assertEqual(kw[1]["path"], "/b/%s/o/%s" % (NAME, NONESUCH)) - - @staticmethod - def _make_blob(bucket_name, blob_name): - from google.cloud.storage.blob import Blob - - blob = mock.create_autospec(Blob) - blob.name = blob_name - blob.path = "/b/{}/o/{}".format(bucket_name, blob_name) - return blob - - def test_copy_blobs_wo_name(self): - SOURCE = "source" - DEST = "dest" - BLOB_NAME = "blob-name" - connection = _Connection({}) - client = _Client(connection) - source = self._make_one(client=client, name=SOURCE) - dest = self._make_one(client=client, name=DEST) - blob = self._make_blob(SOURCE, BLOB_NAME) - - new_blob = source.copy_blob(blob, dest) - - self.assertIs(new_blob.bucket, dest) - self.assertEqual(new_blob.name, BLOB_NAME) - - kw, = connection._requested - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - SOURCE, BLOB_NAME, DEST, BLOB_NAME - ) - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], COPY_PATH) - self.assertEqual(kw["query_params"], {}) - - def test_copy_blobs_source_generation(self): - SOURCE = "source" - DEST = "dest" - BLOB_NAME = "blob-name" - GENERATION = 1512565576797178 - - connection = _Connection({}) - client = _Client(connection) - source = self._make_one(client=client, name=SOURCE) - dest = self._make_one(client=client, name=DEST) - blob = self._make_blob(SOURCE, BLOB_NAME) - - new_blob = source.copy_blob(blob, dest, source_generation=GENERATION) - - self.assertIs(new_blob.bucket, dest) - self.assertEqual(new_blob.name, BLOB_NAME) - - kw, = connection._requested - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - SOURCE, BLOB_NAME, DEST, BLOB_NAME - ) - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], COPY_PATH) - self.assertEqual(kw["query_params"], {"sourceGeneration": GENERATION}) - - def test_copy_blobs_preserve_acl(self): - from google.cloud.storage.acl import ObjectACL - - SOURCE = "source" - DEST = "dest" - BLOB_NAME = "blob-name" - NEW_NAME = "new_name" - - connection = _Connection({}, {}) - client = _Client(connection) - source = self._make_one(client=client, name=SOURCE) - dest = self._make_one(client=client, name=DEST) - blob = self._make_blob(SOURCE, BLOB_NAME) - - new_blob = source.copy_blob( - blob, dest, NEW_NAME, client=client, preserve_acl=False - ) - - self.assertIs(new_blob.bucket, dest) - self.assertEqual(new_blob.name, NEW_NAME) - self.assertIsInstance(new_blob.acl, ObjectACL) - - kw1, kw2 = connection._requested - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - SOURCE, BLOB_NAME, DEST, NEW_NAME - ) - NEW_BLOB_PATH = "/b/{}/o/{}".format(DEST, NEW_NAME) - - self.assertEqual(kw1["method"], "POST") - self.assertEqual(kw1["path"], COPY_PATH) - self.assertEqual(kw1["query_params"], {}) - - self.assertEqual(kw2["method"], "PATCH") - self.assertEqual(kw2["path"], NEW_BLOB_PATH) - self.assertEqual(kw2["query_params"], {"projection": "full"}) - - def test_copy_blobs_w_name_and_user_project(self): - SOURCE = "source" - DEST = "dest" - BLOB_NAME = "blob-name" - NEW_NAME = "new_name" - USER_PROJECT = "user-project-123" - connection = _Connection({}) - client = _Client(connection) - source = self._make_one(client=client, name=SOURCE, user_project=USER_PROJECT) - dest = self._make_one(client=client, name=DEST) - blob = self._make_blob(SOURCE, BLOB_NAME) - - new_blob = source.copy_blob(blob, dest, NEW_NAME) - - self.assertIs(new_blob.bucket, dest) - self.assertEqual(new_blob.name, NEW_NAME) - - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - SOURCE, BLOB_NAME, DEST, NEW_NAME - ) - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], COPY_PATH) - self.assertEqual(kw["query_params"], {"userProject": USER_PROJECT}) - - def test_rename_blob(self): - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "blob-name" - NEW_BLOB_NAME = "new-blob-name" - DATA = {"name": NEW_BLOB_NAME} - connection = _Connection(DATA) - client = _Client(connection) - bucket = self._make_one(client=client, name=BUCKET_NAME) - blob = self._make_blob(BUCKET_NAME, BLOB_NAME) - - renamed_blob = bucket.rename_blob(blob, NEW_BLOB_NAME, client=client) - - self.assertIs(renamed_blob.bucket, bucket) - self.assertEqual(renamed_blob.name, NEW_BLOB_NAME) - - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - BUCKET_NAME, BLOB_NAME, BUCKET_NAME, NEW_BLOB_NAME - ) - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], COPY_PATH) - self.assertEqual(kw["query_params"], {}) - - blob.delete.assert_called_once_with(client) - - def test_rename_blob_to_itself(self): - BUCKET_NAME = "BUCKET_NAME" - BLOB_NAME = "blob-name" - DATA = {"name": BLOB_NAME} - connection = _Connection(DATA) - client = _Client(connection) - bucket = self._make_one(client=client, name=BUCKET_NAME) - blob = self._make_blob(BUCKET_NAME, BLOB_NAME) - - renamed_blob = bucket.rename_blob(blob, BLOB_NAME) - - self.assertIs(renamed_blob.bucket, bucket) - self.assertEqual(renamed_blob.name, BLOB_NAME) - - COPY_PATH = "/b/{}/o/{}/copyTo/b/{}/o/{}".format( - BUCKET_NAME, BLOB_NAME, BUCKET_NAME, BLOB_NAME - ) - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], COPY_PATH) - self.assertEqual(kw["query_params"], {}) - - blob.delete.assert_not_called() - - def test_etag(self): - ETAG = "ETAG" - properties = {"etag": ETAG} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.etag, ETAG) - - def test_id(self): - ID = "ID" - properties = {"id": ID} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.id, ID) - - def test_location_getter(self): - NAME = "name" - before = {"location": "AS"} - bucket = self._make_one(name=NAME, properties=before) - self.assertEqual(bucket.location, "AS") - - @mock.patch("warnings.warn") - def test_location_setter(self, mock_warn): - from google.cloud.storage import bucket as bucket_module - - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertIsNone(bucket.location) - bucket.location = "AS" - self.assertEqual(bucket.location, "AS") - self.assertTrue("location" in bucket._changes) - mock_warn.assert_called_once_with( - bucket_module._LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2 - ) - - def test_iam_configuration_policy_missing(self): - from google.cloud.storage.bucket import IAMConfiguration - - NAME = "name" - bucket = self._make_one(name=NAME) - - config = bucket.iam_configuration - - self.assertIsInstance(config, IAMConfiguration) - self.assertIs(config.bucket, bucket) - self.assertFalse(config.bucket_policy_only_enabled) - self.assertIsNone(config.bucket_policy_only_locked_time) - - def test_iam_configuration_policy_w_entry(self): - import datetime - import pytz - from google.cloud._helpers import _datetime_to_rfc3339 - from google.cloud.storage.bucket import IAMConfiguration - - now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - NAME = "name" - properties = { - "iamConfiguration": { - "uniformBucketLevelAccess": { - "enabled": True, - "lockedTime": _datetime_to_rfc3339(now), - } - } - } - bucket = self._make_one(name=NAME, properties=properties) - - config = bucket.iam_configuration - - self.assertIsInstance(config, IAMConfiguration) - self.assertIs(config.bucket, bucket) - self.assertTrue(config.uniform_bucket_level_access_enabled) - self.assertEqual(config.uniform_bucket_level_access_locked_time, now) - - def test_lifecycle_rules_getter_unknown_action_type(self): - NAME = "name" - BOGUS_RULE = {"action": {"type": "Bogus"}, "condition": {"age": 42}} - rules = [BOGUS_RULE] - properties = {"lifecycle": {"rule": rules}} - bucket = self._make_one(name=NAME, properties=properties) - - with self.assertRaises(ValueError): - list(bucket.lifecycle_rules) - - def test_lifecycle_rules_getter(self): - from google.cloud.storage.bucket import ( - LifecycleRuleDelete, - LifecycleRuleSetStorageClass, - ) - - NAME = "name" - DELETE_RULE = {"action": {"type": "Delete"}, "condition": {"age": 42}} - SSC_RULE = { - "action": {"type": "SetStorageClass", "storageClass": "NEARLINE"}, - "condition": {"isLive": False}, - } - rules = [DELETE_RULE, SSC_RULE] - properties = {"lifecycle": {"rule": rules}} - bucket = self._make_one(name=NAME, properties=properties) - - found = list(bucket.lifecycle_rules) - - delete_rule = found[0] - self.assertIsInstance(delete_rule, LifecycleRuleDelete) - self.assertEqual(dict(delete_rule), DELETE_RULE) - - ssc_rule = found[1] - self.assertIsInstance(ssc_rule, LifecycleRuleSetStorageClass) - self.assertEqual(dict(ssc_rule), SSC_RULE) - - def test_lifecycle_rules_setter_w_dicts(self): - NAME = "name" - DELETE_RULE = {"action": {"type": "Delete"}, "condition": {"age": 42}} - SSC_RULE = { - "action": {"type": "SetStorageClass", "storageClass": "NEARLINE"}, - "condition": {"isLive": False}, - } - rules = [DELETE_RULE, SSC_RULE] - bucket = self._make_one(name=NAME) - self.assertEqual(list(bucket.lifecycle_rules), []) - - bucket.lifecycle_rules = rules - - self.assertEqual([dict(rule) for rule in bucket.lifecycle_rules], rules) - self.assertTrue("lifecycle" in bucket._changes) - - def test_lifecycle_rules_setter_w_helpers(self): - from google.cloud.storage.bucket import ( - LifecycleRuleDelete, - LifecycleRuleSetStorageClass, - ) - - NAME = "name" - DELETE_RULE = {"action": {"type": "Delete"}, "condition": {"age": 42}} - SSC_RULE = { - "action": {"type": "SetStorageClass", "storageClass": "NEARLINE"}, - "condition": {"isLive": False}, - } - rules = [DELETE_RULE, SSC_RULE] - bucket = self._make_one(name=NAME) - self.assertEqual(list(bucket.lifecycle_rules), []) - - bucket.lifecycle_rules = [ - LifecycleRuleDelete(age=42), - LifecycleRuleSetStorageClass("NEARLINE", is_live=False), - ] - - self.assertEqual([dict(rule) for rule in bucket.lifecycle_rules], rules) - self.assertTrue("lifecycle" in bucket._changes) - - def test_clear_lifecycle_rules(self): - NAME = "name" - DELETE_RULE = {"action": {"type": "Delete"}, "condition": {"age": 42}} - SSC_RULE = { - "action": {"type": "SetStorageClass", "storageClass": "NEARLINE"}, - "condition": {"isLive": False}, - } - rules = [DELETE_RULE, SSC_RULE] - bucket = self._make_one(name=NAME) - bucket._properties["lifecycle"] = {"rule": rules} - self.assertEqual(list(bucket.lifecycle_rules), rules) - - bucket.clear_lifecyle_rules() - - self.assertEqual(list(bucket.lifecycle_rules), []) - self.assertTrue("lifecycle" in bucket._changes) - - def test_add_lifecycle_delete_rule(self): - NAME = "name" - DELETE_RULE = {"action": {"type": "Delete"}, "condition": {"age": 42}} - rules = [DELETE_RULE] - bucket = self._make_one(name=NAME) - self.assertEqual(list(bucket.lifecycle_rules), []) - - bucket.add_lifecycle_delete_rule(age=42) - - self.assertEqual([dict(rule) for rule in bucket.lifecycle_rules], rules) - self.assertTrue("lifecycle" in bucket._changes) - - def test_add_lifecycle_set_storage_class_rule(self): - NAME = "name" - SSC_RULE = { - "action": {"type": "SetStorageClass", "storageClass": "NEARLINE"}, - "condition": {"isLive": False}, - } - rules = [SSC_RULE] - bucket = self._make_one(name=NAME) - self.assertEqual(list(bucket.lifecycle_rules), []) - - bucket.add_lifecycle_set_storage_class_rule("NEARLINE", is_live=False) - - self.assertEqual([dict(rule) for rule in bucket.lifecycle_rules], rules) - self.assertTrue("lifecycle" in bucket._changes) - - def test_cors_getter(self): - NAME = "name" - CORS_ENTRY = { - "maxAgeSeconds": 1234, - "method": ["OPTIONS", "GET"], - "origin": ["127.0.0.1"], - "responseHeader": ["Content-Type"], - } - properties = {"cors": [CORS_ENTRY, {}]} - bucket = self._make_one(name=NAME, properties=properties) - entries = bucket.cors - self.assertEqual(len(entries), 2) - self.assertEqual(entries[0], CORS_ENTRY) - self.assertEqual(entries[1], {}) - # Make sure it was a copy, not the same object. - self.assertIsNot(entries[0], CORS_ENTRY) - - def test_cors_setter(self): - NAME = "name" - CORS_ENTRY = { - "maxAgeSeconds": 1234, - "method": ["OPTIONS", "GET"], - "origin": ["127.0.0.1"], - "responseHeader": ["Content-Type"], - } - bucket = self._make_one(name=NAME) - - self.assertEqual(bucket.cors, []) - bucket.cors = [CORS_ENTRY] - self.assertEqual(bucket.cors, [CORS_ENTRY]) - self.assertTrue("cors" in bucket._changes) - - def test_default_kms_key_name_getter(self): - NAME = "name" - KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - ENCRYPTION_CONFIG = {"defaultKmsKeyName": KMS_RESOURCE} - bucket = self._make_one(name=NAME) - self.assertIsNone(bucket.default_kms_key_name) - bucket._properties["encryption"] = ENCRYPTION_CONFIG - self.assertEqual(bucket.default_kms_key_name, KMS_RESOURCE) - - def test_default_kms_key_name_setter(self): - NAME = "name" - KMS_RESOURCE = ( - "projects/test-project-123/" - "locations/us/" - "keyRings/test-ring/" - "cryptoKeys/test-key" - ) - ENCRYPTION_CONFIG = {"defaultKmsKeyName": KMS_RESOURCE} - bucket = self._make_one(name=NAME) - bucket.default_kms_key_name = KMS_RESOURCE - self.assertEqual(bucket._properties["encryption"], ENCRYPTION_CONFIG) - self.assertTrue("encryption" in bucket._changes) - - def test_labels_getter(self): - NAME = "name" - LABELS = {"color": "red", "flavor": "cherry"} - properties = {"labels": LABELS} - bucket = self._make_one(name=NAME, properties=properties) - labels = bucket.labels - self.assertEqual(labels, LABELS) - # Make sure it was a copy, not the same object. - self.assertIsNot(labels, LABELS) - - def test_labels_setter(self): - NAME = "name" - LABELS = {"color": "red", "flavor": "cherry"} - bucket = self._make_one(name=NAME) - - self.assertEqual(bucket.labels, {}) - bucket.labels = LABELS - self.assertEqual(bucket.labels, LABELS) - self.assertIsNot(bucket._properties["labels"], LABELS) - self.assertIn("labels", bucket._changes) - - def test_labels_setter_with_nan(self): - NAME = "name" - LABELS = {"color": "red", "foo": float("nan")} - bucket = self._make_one(name=NAME) - - self.assertEqual(bucket.labels, {}) - bucket.labels = LABELS - value = bucket.labels["foo"] - self.assertIsInstance(value, str) - - def test_labels_setter_with_removal(self): - # Make sure the bucket labels look correct and follow the expected - # public structure. - bucket = self._make_one(name="name") - self.assertEqual(bucket.labels, {}) - bucket.labels = {"color": "red", "flavor": "cherry"} - self.assertEqual(bucket.labels, {"color": "red", "flavor": "cherry"}) - bucket.labels = {"color": "red"} - self.assertEqual(bucket.labels, {"color": "red"}) - - # Make sure that a patch call correctly removes the flavor label. - client = mock.NonCallableMock(spec=("_connection",)) - client._connection = mock.NonCallableMock(spec=("api_request",)) - bucket.patch(client=client) - client._connection.api_request.assert_called() - _, _, kwargs = client._connection.api_request.mock_calls[0] - self.assertEqual(len(kwargs["data"]["labels"]), 2) - self.assertEqual(kwargs["data"]["labels"]["color"], "red") - self.assertIsNone(kwargs["data"]["labels"]["flavor"]) - - # A second patch call should be a no-op for labels. - client._connection.api_request.reset_mock() - bucket.patch(client=client) - client._connection.api_request.assert_called() - _, _, kwargs = client._connection.api_request.mock_calls[0] - self.assertNotIn("labels", kwargs["data"]) - - def test_location_type_getter_unset(self): - bucket = self._make_one() - self.assertIsNone(bucket.location_type) - - def test_location_type_getter_set(self): - from google.cloud.storage.constants import REGION_LOCATION_TYPE - - properties = {"locationType": REGION_LOCATION_TYPE} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.location_type, REGION_LOCATION_TYPE) - - def test_get_logging_w_prefix(self): - NAME = "name" - LOG_BUCKET = "logs" - LOG_PREFIX = "pfx" - before = {"logging": {"logBucket": LOG_BUCKET, "logObjectPrefix": LOG_PREFIX}} - bucket = self._make_one(name=NAME, properties=before) - info = bucket.get_logging() - self.assertEqual(info["logBucket"], LOG_BUCKET) - self.assertEqual(info["logObjectPrefix"], LOG_PREFIX) - - def test_enable_logging_defaults(self): - NAME = "name" - LOG_BUCKET = "logs" - before = {"logging": None} - bucket = self._make_one(name=NAME, properties=before) - self.assertIsNone(bucket.get_logging()) - bucket.enable_logging(LOG_BUCKET) - info = bucket.get_logging() - self.assertEqual(info["logBucket"], LOG_BUCKET) - self.assertEqual(info["logObjectPrefix"], "") - - def test_enable_logging(self): - NAME = "name" - LOG_BUCKET = "logs" - LOG_PFX = "pfx" - before = {"logging": None} - bucket = self._make_one(name=NAME, properties=before) - self.assertIsNone(bucket.get_logging()) - bucket.enable_logging(LOG_BUCKET, LOG_PFX) - info = bucket.get_logging() - self.assertEqual(info["logBucket"], LOG_BUCKET) - self.assertEqual(info["logObjectPrefix"], LOG_PFX) - - def test_disable_logging(self): - NAME = "name" - before = {"logging": {"logBucket": "logs", "logObjectPrefix": "pfx"}} - bucket = self._make_one(name=NAME, properties=before) - self.assertIsNotNone(bucket.get_logging()) - bucket.disable_logging() - self.assertIsNone(bucket.get_logging()) - - def test_metageneration(self): - METAGENERATION = 42 - properties = {"metageneration": METAGENERATION} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.metageneration, METAGENERATION) - - def test_metageneration_unset(self): - bucket = self._make_one() - self.assertIsNone(bucket.metageneration) - - def test_metageneration_string_val(self): - METAGENERATION = 42 - properties = {"metageneration": str(METAGENERATION)} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.metageneration, METAGENERATION) - - def test_owner(self): - OWNER = {"entity": "project-owner-12345", "entityId": "23456"} - properties = {"owner": OWNER} - bucket = self._make_one(properties=properties) - owner = bucket.owner - self.assertEqual(owner["entity"], "project-owner-12345") - self.assertEqual(owner["entityId"], "23456") - - def test_project_number(self): - PROJECT_NUMBER = 12345 - properties = {"projectNumber": PROJECT_NUMBER} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.project_number, PROJECT_NUMBER) - - def test_project_number_unset(self): - bucket = self._make_one() - self.assertIsNone(bucket.project_number) - - def test_project_number_string_val(self): - PROJECT_NUMBER = 12345 - properties = {"projectNumber": str(PROJECT_NUMBER)} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.project_number, PROJECT_NUMBER) - - def test_retention_policy_effective_time_policy_missing(self): - bucket = self._make_one() - self.assertIsNone(bucket.retention_policy_effective_time) - - def test_retention_policy_effective_time_et_missing(self): - properties = {"retentionPolicy": {}} - bucket = self._make_one(properties=properties) - - self.assertIsNone(bucket.retention_policy_effective_time) - - def test_retention_policy_effective_time(self): - import datetime - from google.cloud._helpers import _datetime_to_rfc3339 - from google.cloud._helpers import UTC - - effective_time = datetime.datetime.utcnow().replace(tzinfo=UTC) - properties = { - "retentionPolicy": {"effectiveTime": _datetime_to_rfc3339(effective_time)} - } - bucket = self._make_one(properties=properties) - - self.assertEqual(bucket.retention_policy_effective_time, effective_time) - - def test_retention_policy_locked_missing(self): - bucket = self._make_one() - self.assertFalse(bucket.retention_policy_locked) - - def test_retention_policy_locked_false(self): - properties = {"retentionPolicy": {"isLocked": False}} - bucket = self._make_one(properties=properties) - self.assertFalse(bucket.retention_policy_locked) - - def test_retention_policy_locked_true(self): - properties = {"retentionPolicy": {"isLocked": True}} - bucket = self._make_one(properties=properties) - self.assertTrue(bucket.retention_policy_locked) - - def test_retention_period_getter_policymissing(self): - bucket = self._make_one() - - self.assertIsNone(bucket.retention_period) - - def test_retention_period_getter_pr_missing(self): - properties = {"retentionPolicy": {}} - bucket = self._make_one(properties=properties) - - self.assertIsNone(bucket.retention_period) - - def test_retention_period_getter(self): - period = 86400 * 100 # 100 days - properties = {"retentionPolicy": {"retentionPeriod": str(period)}} - bucket = self._make_one(properties=properties) - - self.assertEqual(bucket.retention_period, period) - - def test_retention_period_setter_w_none(self): - period = 86400 * 100 # 100 days - bucket = self._make_one() - bucket._properties["retentionPolicy"] = {"retentionPeriod": period} - - bucket.retention_period = None - - self.assertIsNone(bucket._properties["retentionPolicy"]) - - def test_retention_period_setter_w_int(self): - period = 86400 * 100 # 100 days - bucket = self._make_one() - - bucket.retention_period = period - - self.assertEqual( - bucket._properties["retentionPolicy"]["retentionPeriod"], str(period) - ) - - def test_self_link(self): - SELF_LINK = "http://example.com/self/" - properties = {"selfLink": SELF_LINK} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.self_link, SELF_LINK) - - def test_storage_class_getter(self): - from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS - - properties = {"storageClass": NEARLINE_STORAGE_CLASS} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.storage_class, NEARLINE_STORAGE_CLASS) - - def test_storage_class_setter_invalid(self): - NAME = "name" - bucket = self._make_one(name=NAME) - with self.assertRaises(ValueError): - bucket.storage_class = "BOGUS" - self.assertFalse("storageClass" in bucket._changes) - - def test_storage_class_setter_STANDARD(self): - from google.cloud.storage.constants import STANDARD_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = STANDARD_STORAGE_CLASS - self.assertEqual(bucket.storage_class, STANDARD_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_NEARLINE(self): - from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = NEARLINE_STORAGE_CLASS - self.assertEqual(bucket.storage_class, NEARLINE_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_COLDLINE(self): - from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = COLDLINE_STORAGE_CLASS - self.assertEqual(bucket.storage_class, COLDLINE_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_ARCHIVE(self): - from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = ARCHIVE_STORAGE_CLASS - self.assertEqual(bucket.storage_class, ARCHIVE_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_MULTI_REGIONAL(self): - from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = MULTI_REGIONAL_LEGACY_STORAGE_CLASS - self.assertEqual(bucket.storage_class, MULTI_REGIONAL_LEGACY_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_REGIONAL(self): - from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = REGIONAL_LEGACY_STORAGE_CLASS - self.assertEqual(bucket.storage_class, REGIONAL_LEGACY_STORAGE_CLASS) - self.assertTrue("storageClass" in bucket._changes) - - def test_storage_class_setter_DURABLE_REDUCED_AVAILABILITY(self): - from google.cloud.storage.constants import ( - DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, - ) - - NAME = "name" - bucket = self._make_one(name=NAME) - bucket.storage_class = DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS - self.assertEqual( - bucket.storage_class, DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS - ) - self.assertTrue("storageClass" in bucket._changes) - - def test_time_created(self): - from google.cloud._helpers import _RFC3339_MICROS - from google.cloud._helpers import UTC - - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {"timeCreated": TIME_CREATED} - bucket = self._make_one(properties=properties) - self.assertEqual(bucket.time_created, TIMESTAMP) - - def test_time_created_unset(self): - bucket = self._make_one() - self.assertIsNone(bucket.time_created) - - def test_versioning_enabled_getter_missing(self): - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertEqual(bucket.versioning_enabled, False) - - def test_versioning_enabled_getter(self): - NAME = "name" - before = {"versioning": {"enabled": True}} - bucket = self._make_one(name=NAME, properties=before) - self.assertEqual(bucket.versioning_enabled, True) - - def test_versioning_enabled_setter(self): - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertFalse(bucket.versioning_enabled) - bucket.versioning_enabled = True - self.assertTrue(bucket.versioning_enabled) - - def test_requester_pays_getter_missing(self): - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertEqual(bucket.requester_pays, False) - - def test_requester_pays_getter(self): - NAME = "name" - before = {"billing": {"requesterPays": True}} - bucket = self._make_one(name=NAME, properties=before) - self.assertEqual(bucket.requester_pays, True) - - def test_requester_pays_setter(self): - NAME = "name" - bucket = self._make_one(name=NAME) - self.assertFalse(bucket.requester_pays) - bucket.requester_pays = True - self.assertTrue(bucket.requester_pays) - - def test_configure_website_defaults(self): - NAME = "name" - UNSET = {"website": {"mainPageSuffix": None, "notFoundPage": None}} - bucket = self._make_one(name=NAME) - bucket.configure_website() - self.assertEqual(bucket._properties, UNSET) - - def test_configure_website(self): - NAME = "name" - WEBSITE_VAL = { - "website": {"mainPageSuffix": "html", "notFoundPage": "404.html"} - } - bucket = self._make_one(name=NAME) - bucket.configure_website("html", "404.html") - self.assertEqual(bucket._properties, WEBSITE_VAL) - - def test_disable_website(self): - NAME = "name" - UNSET = {"website": {"mainPageSuffix": None, "notFoundPage": None}} - bucket = self._make_one(name=NAME) - bucket.disable_website() - self.assertEqual(bucket._properties, UNSET) - - def test_get_iam_policy(self): - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - from google.cloud.storage.iam import STORAGE_EDITOR_ROLE - from google.cloud.storage.iam import STORAGE_VIEWER_ROLE - from google.api_core.iam import Policy - - NAME = "name" - PATH = "/b/%s" % (NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - EDITOR1 = "domain:google.com" - EDITOR2 = "user:phred@example.com" - VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" - VIEWER2 = "user:phred@example.com" - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [ - {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, - {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, - {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, - ], - } - EXPECTED = { - binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"] - } - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME) - - policy = bucket.get_iam_policy() - - self.assertIsInstance(policy, Policy) - self.assertEqual(policy.etag, RETURNED["etag"]) - self.assertEqual(policy.version, RETURNED["version"]) - self.assertEqual(dict(policy), EXPECTED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {}) - - def test_get_iam_policy_w_user_project(self): - from google.api_core.iam import Policy - - NAME = "name" - USER_PROJECT = "user-project-123" - PATH = "/b/%s" % (NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [], - } - EXPECTED = {} - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - - policy = bucket.get_iam_policy() - - self.assertIsInstance(policy, Policy) - self.assertEqual(policy.etag, RETURNED["etag"]) - self.assertEqual(policy.version, RETURNED["version"]) - self.assertEqual(dict(policy), EXPECTED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - - def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - - NAME = "name" - PATH = "/b/%s" % (NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - RETURNED = { - "resourceId": PATH, - "etag": ETAG, - "version": VERSION, - "bindings": [{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}], - } - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME) - - policy = bucket.get_iam_policy(requested_policy_version=3) - - self.assertEqual(policy.version, VERSION) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"optionsRequestedPolicyVersion": 3}) - - def test_set_iam_policy(self): - import operator - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - from google.cloud.storage.iam import STORAGE_EDITOR_ROLE - from google.cloud.storage.iam import STORAGE_VIEWER_ROLE - from google.api_core.iam import Policy - - NAME = "name" - PATH = "/b/%s" % (NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - EDITOR1 = "domain:google.com" - EDITOR2 = "user:phred@example.com" - VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" - VIEWER2 = "user:phred@example.com" - BINDINGS = [ - {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, - {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, - {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, - ] - RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} - policy = Policy() - for binding in BINDINGS: - policy[binding["role"]] = binding["members"] - - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME) - - returned = bucket.set_iam_policy(policy) - - self.assertEqual(returned.etag, ETAG) - self.assertEqual(returned.version, VERSION) - self.assertEqual(dict(returned), dict(policy)) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {}) - sent = kw[0]["data"] - self.assertEqual(sent["resourceId"], PATH) - self.assertEqual(len(sent["bindings"]), len(BINDINGS)) - key = operator.itemgetter("role") - for found, expected in zip( - sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key) - ): - self.assertEqual(found["role"], expected["role"]) - self.assertEqual(sorted(found["members"]), sorted(expected["members"])) - - def test_set_iam_policy_w_user_project(self): - import operator - from google.cloud.storage.iam import STORAGE_OWNER_ROLE - from google.cloud.storage.iam import STORAGE_EDITOR_ROLE - from google.cloud.storage.iam import STORAGE_VIEWER_ROLE - from google.api_core.iam import Policy - - NAME = "name" - USER_PROJECT = "user-project-123" - PATH = "/b/%s" % (NAME,) - ETAG = "DEADBEEF" - VERSION = 1 - OWNER1 = "user:phred@example.com" - OWNER2 = "group:cloud-logs@google.com" - EDITOR1 = "domain:google.com" - EDITOR2 = "user:phred@example.com" - VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" - VIEWER2 = "user:phred@example.com" - BINDINGS = [ - {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, - {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, - {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, - ] - RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} - policy = Policy() - for binding in BINDINGS: - policy[binding["role"]] = binding["members"] - - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - - returned = bucket.set_iam_policy(policy) - - self.assertEqual(returned.etag, ETAG) - self.assertEqual(returned.version, VERSION) - self.assertEqual(dict(returned), dict(policy)) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PUT") - self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) - sent = kw[0]["data"] - self.assertEqual(sent["resourceId"], PATH) - self.assertEqual(len(sent["bindings"]), len(BINDINGS)) - key = operator.itemgetter("role") - for found, expected in zip( - sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key) - ): - self.assertEqual(found["role"], expected["role"]) - self.assertEqual(sorted(found["members"]), sorted(expected["members"])) - - def test_test_iam_permissions(self): - from google.cloud.storage.iam import STORAGE_OBJECTS_LIST - from google.cloud.storage.iam import STORAGE_BUCKETS_GET - from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE - - NAME = "name" - PATH = "/b/%s" % (NAME,) - PERMISSIONS = [ - STORAGE_OBJECTS_LIST, - STORAGE_BUCKETS_GET, - STORAGE_BUCKETS_UPDATE, - ] - ALLOWED = PERMISSIONS[1:] - RETURNED = {"permissions": ALLOWED} - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME) - - allowed = bucket.test_iam_permissions(PERMISSIONS) - - self.assertEqual(allowed, ALLOWED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) - self.assertEqual(kw[0]["query_params"], {"permissions": PERMISSIONS}) - - def test_test_iam_permissions_w_user_project(self): - from google.cloud.storage.iam import STORAGE_OBJECTS_LIST - from google.cloud.storage.iam import STORAGE_BUCKETS_GET - from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE - - NAME = "name" - USER_PROJECT = "user-project-123" - PATH = "/b/%s" % (NAME,) - PERMISSIONS = [ - STORAGE_OBJECTS_LIST, - STORAGE_BUCKETS_GET, - STORAGE_BUCKETS_UPDATE, - ] - ALLOWED = PERMISSIONS[1:] - RETURNED = {"permissions": ALLOWED} - connection = _Connection(RETURNED) - client = _Client(connection, None) - bucket = self._make_one(client=client, name=NAME, user_project=USER_PROJECT) - - allowed = bucket.test_iam_permissions(PERMISSIONS) - - self.assertEqual(allowed, ALLOWED) - - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "GET") - self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) - self.assertEqual( - kw[0]["query_params"], - {"permissions": PERMISSIONS, "userProject": USER_PROJECT}, - ) - - def test_make_public_defaults(self): - from google.cloud.storage.acl import _ACLEntity - - NAME = "name" - permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] - after = {"acl": permissive, "defaultObjectAcl": []} - connection = _Connection(after) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - bucket.make_public() - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": after["acl"]}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - - def _make_public_w_future_helper(self, default_object_acl_loaded=True): - from google.cloud.storage.acl import _ACLEntity - - NAME = "name" - permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] - after1 = {"acl": permissive, "defaultObjectAcl": []} - after2 = {"acl": permissive, "defaultObjectAcl": permissive} - if default_object_acl_loaded: - num_requests = 2 - connection = _Connection(after1, after2) - else: - num_requests = 3 - # We return the same value for default_object_acl.reload() - # to consume. - connection = _Connection(after1, after1, after2) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = default_object_acl_loaded - bucket.make_public(future=True) - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), permissive) - kw = connection._requested - self.assertEqual(len(kw), num_requests) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": permissive}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - if not default_object_acl_loaded: - self.assertEqual(kw[1]["method"], "GET") - self.assertEqual(kw[1]["path"], "/b/%s/defaultObjectAcl" % NAME) - # Last could be 1 or 2 depending on `default_object_acl_loaded`. - self.assertEqual(kw[-1]["method"], "PATCH") - self.assertEqual(kw[-1]["path"], "/b/%s" % NAME) - self.assertEqual(kw[-1]["data"], {"defaultObjectAcl": permissive}) - self.assertEqual(kw[-1]["query_params"], {"projection": "full"}) - - def test_make_public_w_future(self): - self._make_public_w_future_helper(default_object_acl_loaded=True) - - def test_make_public_w_future_reload_default(self): - self._make_public_w_future_helper(default_object_acl_loaded=False) - - def test_make_public_recursive(self): - from google.cloud.storage.acl import _ACLEntity - - _saved = [] - - class _Blob(object): - _granted = False - - def __init__(self, bucket, name): - self._bucket = bucket - self._name = name - - @property - def acl(self): - return self - - # Faux ACL methods - def all(self): - return self - - def grant_read(self): - self._granted = True - - def save(self, client=None): - _saved.append((self._bucket, self._name, self._granted, client)) - - def item_to_blob(self, item): - return _Blob(self.bucket, item["name"]) - - NAME = "name" - BLOB_NAME = "blob-name" - permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] - after = {"acl": permissive, "defaultObjectAcl": []} - connection = _Connection(after, {"items": [{"name": BLOB_NAME}]}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - - with mock.patch("google.cloud.storage.bucket._item_to_blob", new=item_to_blob): - bucket.make_public(recursive=True) - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), []) - self.assertEqual(_saved, [(bucket, BLOB_NAME, True, None)]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": permissive}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - self.assertEqual(kw[1]["method"], "GET") - self.assertEqual(kw[1]["path"], "/b/%s/o" % NAME) - max_results = bucket._MAX_OBJECTS_FOR_ITERATION + 1 - self.assertEqual( - kw[1]["query_params"], {"maxResults": max_results, "projection": "full"} - ) - - def test_make_public_recursive_too_many(self): - from google.cloud.storage.acl import _ACLEntity - - PERMISSIVE = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] - AFTER = {"acl": PERMISSIVE, "defaultObjectAcl": []} - - NAME = "name" - BLOB_NAME1 = "blob-name1" - BLOB_NAME2 = "blob-name2" - GET_BLOBS_RESP = {"items": [{"name": BLOB_NAME1}, {"name": BLOB_NAME2}]} - connection = _Connection(AFTER, GET_BLOBS_RESP) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - - # Make the Bucket refuse to make_public with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 - self.assertRaises(ValueError, bucket.make_public, recursive=True) - - def test_make_private_defaults(self): - NAME = "name" - no_permissions = [] - after = {"acl": no_permissions, "defaultObjectAcl": []} - connection = _Connection(after) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - bucket.make_private() - self.assertEqual(list(bucket.acl), no_permissions) - self.assertEqual(list(bucket.default_object_acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": after["acl"]}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - - def _make_private_w_future_helper(self, default_object_acl_loaded=True): - NAME = "name" - no_permissions = [] - after1 = {"acl": no_permissions, "defaultObjectAcl": []} - after2 = {"acl": no_permissions, "defaultObjectAcl": no_permissions} - if default_object_acl_loaded: - num_requests = 2 - connection = _Connection(after1, after2) - else: - num_requests = 3 - # We return the same value for default_object_acl.reload() - # to consume. - connection = _Connection(after1, after1, after2) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = default_object_acl_loaded - bucket.make_private(future=True) - self.assertEqual(list(bucket.acl), no_permissions) - self.assertEqual(list(bucket.default_object_acl), no_permissions) - kw = connection._requested - self.assertEqual(len(kw), num_requests) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": no_permissions}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - if not default_object_acl_loaded: - self.assertEqual(kw[1]["method"], "GET") - self.assertEqual(kw[1]["path"], "/b/%s/defaultObjectAcl" % NAME) - # Last could be 1 or 2 depending on `default_object_acl_loaded`. - self.assertEqual(kw[-1]["method"], "PATCH") - self.assertEqual(kw[-1]["path"], "/b/%s" % NAME) - self.assertEqual(kw[-1]["data"], {"defaultObjectAcl": no_permissions}) - self.assertEqual(kw[-1]["query_params"], {"projection": "full"}) - - def test_make_private_w_future(self): - self._make_private_w_future_helper(default_object_acl_loaded=True) - - def test_make_private_w_future_reload_default(self): - self._make_private_w_future_helper(default_object_acl_loaded=False) - - def test_make_private_recursive(self): - _saved = [] - - class _Blob(object): - _granted = True - - def __init__(self, bucket, name): - self._bucket = bucket - self._name = name - - @property - def acl(self): - return self - - # Faux ACL methods - def all(self): - return self - - def revoke_read(self): - self._granted = False - - def save(self, client=None): - _saved.append((self._bucket, self._name, self._granted, client)) - - def item_to_blob(self, item): - return _Blob(self.bucket, item["name"]) - - NAME = "name" - BLOB_NAME = "blob-name" - no_permissions = [] - after = {"acl": no_permissions, "defaultObjectAcl": []} - connection = _Connection(after, {"items": [{"name": BLOB_NAME}]}) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - - with mock.patch("google.cloud.storage.bucket._item_to_blob", new=item_to_blob): - bucket.make_private(recursive=True) - self.assertEqual(list(bucket.acl), no_permissions) - self.assertEqual(list(bucket.default_object_acl), []) - self.assertEqual(_saved, [(bucket, BLOB_NAME, False, None)]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]["method"], "PATCH") - self.assertEqual(kw[0]["path"], "/b/%s" % NAME) - self.assertEqual(kw[0]["data"], {"acl": no_permissions}) - self.assertEqual(kw[0]["query_params"], {"projection": "full"}) - self.assertEqual(kw[1]["method"], "GET") - self.assertEqual(kw[1]["path"], "/b/%s/o" % NAME) - max_results = bucket._MAX_OBJECTS_FOR_ITERATION + 1 - self.assertEqual( - kw[1]["query_params"], {"maxResults": max_results, "projection": "full"} - ) - - def test_make_private_recursive_too_many(self): - NO_PERMISSIONS = [] - AFTER = {"acl": NO_PERMISSIONS, "defaultObjectAcl": []} - - NAME = "name" - BLOB_NAME1 = "blob-name1" - BLOB_NAME2 = "blob-name2" - GET_BLOBS_RESP = {"items": [{"name": BLOB_NAME1}, {"name": BLOB_NAME2}]} - connection = _Connection(AFTER, GET_BLOBS_RESP) - client = _Client(connection) - bucket = self._make_one(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - - # Make the Bucket refuse to make_private with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 - self.assertRaises(ValueError, bucket.make_private, recursive=True) - - def test_page_empty_response(self): - from google.api_core import page_iterator - - connection = _Connection() - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - iterator = bucket.list_blobs() - page = page_iterator.Page(iterator, (), None) - iterator._page = page - blobs = list(page) - self.assertEqual(blobs, []) - self.assertEqual(iterator.prefixes, set()) - - def test_page_non_empty_response(self): - import six - from google.cloud.storage.blob import Blob - - blob_name = "blob-name" - response = {"items": [{"name": blob_name}], "prefixes": ["foo"]} - connection = _Connection() - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - - def dummy_response(): - return response - - iterator = bucket.list_blobs() - iterator._get_next_page_response = dummy_response - - page = six.next(iterator.pages) - self.assertEqual(page.prefixes, ("foo",)) - self.assertEqual(page.num_items, 1) - blob = six.next(page) - self.assertEqual(page.remaining, 0) - self.assertIsInstance(blob, Blob) - self.assertEqual(blob.name, blob_name) - self.assertEqual(iterator.prefixes, set(["foo"])) - - def test_cumulative_prefixes(self): - import six - from google.cloud.storage.blob import Blob - - BLOB_NAME = "blob-name1" - response1 = { - "items": [{"name": BLOB_NAME}], - "prefixes": ["foo"], - "nextPageToken": "s39rmf9", - } - response2 = {"items": [], "prefixes": ["bar"]} - connection = _Connection() - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - responses = [response1, response2] - - def dummy_response(): - return responses.pop(0) - - iterator = bucket.list_blobs() - iterator._get_next_page_response = dummy_response - - # Parse first response. - pages_iter = iterator.pages - page1 = six.next(pages_iter) - self.assertEqual(page1.prefixes, ("foo",)) - self.assertEqual(page1.num_items, 1) - blob = six.next(page1) - self.assertEqual(page1.remaining, 0) - self.assertIsInstance(blob, Blob) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(iterator.prefixes, set(["foo"])) - # Parse second response. - page2 = six.next(pages_iter) - self.assertEqual(page2.prefixes, ("bar",)) - self.assertEqual(page2.num_items, 0) - self.assertEqual(iterator.prefixes, set(["foo", "bar"])) - - def _test_generate_upload_policy_helper(self, **kwargs): - import base64 - import json - - credentials = _create_signing_credentials() - credentials.signer_email = mock.sentinel.signer_email - credentials.sign_bytes.return_value = b"DEADBEEF" - connection = _Connection() - connection.credentials = credentials - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - - conditions = [["starts-with", "$key", ""]] - - policy_fields = bucket.generate_upload_policy(conditions, **kwargs) - - self.assertEqual(policy_fields["bucket"], bucket.name) - self.assertEqual(policy_fields["GoogleAccessId"], mock.sentinel.signer_email) - self.assertEqual( - policy_fields["signature"], base64.b64encode(b"DEADBEEF").decode("utf-8") - ) - - policy = json.loads(base64.b64decode(policy_fields["policy"]).decode("utf-8")) - - policy_conditions = policy["conditions"] - expected_conditions = [{"bucket": bucket.name}] + conditions - for expected_condition in expected_conditions: - for condition in policy_conditions: - if condition == expected_condition: - break - else: # pragma: NO COVER - self.fail( - "Condition {} not found in {}".format( - expected_condition, policy_conditions - ) - ) - - return policy_fields, policy - - @mock.patch( - "google.cloud.storage.bucket._NOW", return_value=datetime.datetime(1990, 1, 1) - ) - def test_generate_upload_policy(self, now): - from google.cloud._helpers import _datetime_to_rfc3339 - - _, policy = self._test_generate_upload_policy_helper() - - self.assertEqual( - policy["expiration"], - _datetime_to_rfc3339(now() + datetime.timedelta(hours=1)), - ) - - def test_generate_upload_policy_args(self): - from google.cloud._helpers import _datetime_to_rfc3339 - - expiration = datetime.datetime(1990, 5, 29) - - _, policy = self._test_generate_upload_policy_helper(expiration=expiration) - - self.assertEqual(policy["expiration"], _datetime_to_rfc3339(expiration)) - - def test_generate_upload_policy_bad_credentials(self): - credentials = object() - connection = _Connection() - connection.credentials = credentials - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - - with self.assertRaises(AttributeError): - bucket.generate_upload_policy([]) - - def test_lock_retention_policy_no_policy_set(self): - credentials = object() - connection = _Connection() - connection.credentials = credentials - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - bucket._properties["metageneration"] = 1234 - - with self.assertRaises(ValueError): - bucket.lock_retention_policy() - - def test_lock_retention_policy_no_metageneration(self): - credentials = object() - connection = _Connection() - connection.credentials = credentials - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - bucket._properties["retentionPolicy"] = { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "retentionPeriod": 86400 * 100, # 100 days - } - - with self.assertRaises(ValueError): - bucket.lock_retention_policy() - - def test_lock_retention_policy_already_locked(self): - credentials = object() - connection = _Connection() - connection.credentials = credentials - client = _Client(connection) - name = "name" - bucket = self._make_one(client=client, name=name) - bucket._properties["metageneration"] = 1234 - bucket._properties["retentionPolicy"] = { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "isLocked": True, - "retentionPeriod": 86400 * 100, # 100 days - } - - with self.assertRaises(ValueError): - bucket.lock_retention_policy() - - def test_lock_retention_policy_ok(self): - name = "name" - response = { - "name": name, - "metageneration": 1235, - "retentionPolicy": { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "isLocked": True, - "retentionPeriod": 86400 * 100, # 100 days - }, - } - credentials = object() - connection = _Connection(response) - connection.credentials = credentials - client = _Client(connection) - bucket = self._make_one(client=client, name=name) - bucket._properties["metageneration"] = 1234 - bucket._properties["retentionPolicy"] = { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "retentionPeriod": 86400 * 100, # 100 days - } - - bucket.lock_retention_policy() - - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], "/b/{}/lockRetentionPolicy".format(name)) - self.assertEqual(kw["query_params"], {"ifMetagenerationMatch": 1234}) - - def test_lock_retention_policy_w_user_project(self): - name = "name" - user_project = "user-project-123" - response = { - "name": name, - "metageneration": 1235, - "retentionPolicy": { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "isLocked": True, - "retentionPeriod": 86400 * 100, # 100 days - }, - } - credentials = object() - connection = _Connection(response) - connection.credentials = credentials - client = _Client(connection) - bucket = self._make_one(client=client, name=name, user_project=user_project) - bucket._properties["metageneration"] = 1234 - bucket._properties["retentionPolicy"] = { - "effectiveTime": "2018-03-01T16:46:27.123456Z", - "retentionPeriod": 86400 * 100, # 100 days - } - - bucket.lock_retention_policy() - - kw, = connection._requested - self.assertEqual(kw["method"], "POST") - self.assertEqual(kw["path"], "/b/{}/lockRetentionPolicy".format(name)) - self.assertEqual( - kw["query_params"], - {"ifMetagenerationMatch": 1234, "userProject": user_project}, - ) - - def test_generate_signed_url_w_invalid_version(self): - expiration = "2014-10-16T20:34:37.000Z" - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(name="bucket_name", client=client) - with self.assertRaises(ValueError): - bucket.generate_signed_url(expiration, version="nonesuch") - - def _generate_signed_url_helper( - self, - version=None, - bucket_name="bucket-name", - api_access_endpoint=None, - method="GET", - content_md5=None, - content_type=None, - response_type=None, - response_disposition=None, - generation=None, - headers=None, - query_parameters=None, - credentials=None, - expiration=None, - ): - from six.moves.urllib import parse - from google.cloud._helpers import UTC - from google.cloud.storage.blob import _API_ACCESS_ENDPOINT - - api_access_endpoint = api_access_endpoint or _API_ACCESS_ENDPOINT - - delta = datetime.timedelta(hours=1) - - if expiration is None: - expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) + delta - - connection = _Connection() - client = _Client(connection) - bucket = self._make_one(name=bucket_name, client=client) - - if version is None: - effective_version = "v2" - else: - effective_version = version - - to_patch = "google.cloud.storage.bucket.generate_signed_url_{}".format( - effective_version - ) - - with mock.patch(to_patch) as signer: - signed_uri = bucket.generate_signed_url( - expiration=expiration, - api_access_endpoint=api_access_endpoint, - method=method, - credentials=credentials, - headers=headers, - query_parameters=query_parameters, - version=version, - ) - - self.assertEqual(signed_uri, signer.return_value) - - if credentials is None: - expected_creds = client._credentials - else: - expected_creds = credentials - - encoded_name = bucket_name.encode("utf-8") - expected_resource = "/{}".format(parse.quote(encoded_name)) - expected_kwargs = { - "resource": expected_resource, - "expiration": expiration, - "api_access_endpoint": api_access_endpoint, - "method": method.upper(), - "headers": headers, - "query_parameters": query_parameters, - } - signer.assert_called_once_with(expected_creds, **expected_kwargs) - - def test_get_bucket_from_string_w_valid_uri(self): - from google.cloud.storage.bucket import Bucket - - connection = _Connection() - client = _Client(connection) - BUCKET_NAME = "BUCKET_NAME" - uri = "gs://" + BUCKET_NAME - bucket = Bucket.from_string(uri, client) - self.assertIsInstance(bucket, Bucket) - self.assertIs(bucket.client, client) - self.assertEqual(bucket.name, BUCKET_NAME) - - def test_get_bucket_from_string_w_invalid_uri(self): - from google.cloud.storage.bucket import Bucket - - connection = _Connection() - client = _Client(connection) - - with pytest.raises(ValueError, match="URI scheme must be gs"): - Bucket.from_string("http://bucket_name", client) - - def test_get_bucket_from_string_w_domain_name_bucket(self): - from google.cloud.storage.bucket import Bucket - - connection = _Connection() - client = _Client(connection) - BUCKET_NAME = "buckets.example.com" - uri = "gs://" + BUCKET_NAME - bucket = Bucket.from_string(uri, client) - self.assertIsInstance(bucket, Bucket) - self.assertIs(bucket.client, client) - self.assertEqual(bucket.name, BUCKET_NAME) - - def test_generate_signed_url_no_version_passed_warning(self): - self._generate_signed_url_helper() - - def _generate_signed_url_v2_helper(self, **kw): - version = "v2" - self._generate_signed_url_helper(version, **kw) - - def test_generate_signed_url_v2_w_defaults(self): - self._generate_signed_url_v2_helper() - - def test_generate_signed_url_v2_w_expiration(self): - from google.cloud._helpers import UTC - - expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) - self._generate_signed_url_v2_helper(expiration=expiration) - - def test_generate_signed_url_v2_w_endpoint(self): - self._generate_signed_url_v2_helper( - api_access_endpoint="https://api.example.com/v1" - ) - - def test_generate_signed_url_v2_w_method(self): - self._generate_signed_url_v2_helper(method="POST") - - def test_generate_signed_url_v2_w_lowercase_method(self): - self._generate_signed_url_v2_helper(method="get") - - def test_generate_signed_url_v2_w_content_md5(self): - self._generate_signed_url_v2_helper(content_md5="FACEDACE") - - def test_generate_signed_url_v2_w_content_type(self): - self._generate_signed_url_v2_helper(content_type="text.html") - - def test_generate_signed_url_v2_w_response_type(self): - self._generate_signed_url_v2_helper(response_type="text.html") - - def test_generate_signed_url_v2_w_response_disposition(self): - self._generate_signed_url_v2_helper(response_disposition="inline") - - def test_generate_signed_url_v2_w_generation(self): - self._generate_signed_url_v2_helper(generation=12345) - - def test_generate_signed_url_v2_w_headers(self): - self._generate_signed_url_v2_helper(headers={"x-goog-foo": "bar"}) - - def test_generate_signed_url_v2_w_credentials(self): - credentials = object() - self._generate_signed_url_v2_helper(credentials=credentials) - - def _generate_signed_url_v4_helper(self, **kw): - version = "v4" - self._generate_signed_url_helper(version, **kw) - - def test_generate_signed_url_v4_w_defaults(self): - self._generate_signed_url_v2_helper() - - def test_generate_signed_url_v4_w_endpoint(self): - self._generate_signed_url_v4_helper( - api_access_endpoint="https://api.example.com/v1" - ) - - def test_generate_signed_url_v4_w_method(self): - self._generate_signed_url_v4_helper(method="POST") - - def test_generate_signed_url_v4_w_lowercase_method(self): - self._generate_signed_url_v4_helper(method="get") - - def test_generate_signed_url_v4_w_content_md5(self): - self._generate_signed_url_v4_helper(content_md5="FACEDACE") - - def test_generate_signed_url_v4_w_content_type(self): - self._generate_signed_url_v4_helper(content_type="text.html") - - def test_generate_signed_url_v4_w_response_type(self): - self._generate_signed_url_v4_helper(response_type="text.html") - - def test_generate_signed_url_v4_w_response_disposition(self): - self._generate_signed_url_v4_helper(response_disposition="inline") - - def test_generate_signed_url_v4_w_generation(self): - self._generate_signed_url_v4_helper(generation=12345) - - def test_generate_signed_url_v4_w_headers(self): - self._generate_signed_url_v4_helper(headers={"x-goog-foo": "bar"}) - - def test_generate_signed_url_v4_w_credentials(self): - credentials = object() - self._generate_signed_url_v4_helper(credentials=credentials) - - -class _Connection(object): - _delete_bucket = False - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - self._deleted_buckets = [] - self.credentials = None - - @staticmethod - def _is_bucket_path(path): - # Now just ensure the path only has /b/ and one more segment. - return path.startswith("/b/") and path.count("/") == 2 - - def api_request(self, **kw): - from google.cloud.exceptions import NotFound - - self._requested.append(kw) - - method = kw.get("method") - path = kw.get("path", "") - if method == "DELETE" and self._is_bucket_path(path): - self._deleted_buckets.append(kw) - if self._delete_bucket: - return - else: - raise NotFound("miss") - - try: - response, self._responses = self._responses[0], self._responses[1:] - except IndexError: - raise NotFound("miss") - else: - return response - - -class _Client(object): - def __init__(self, connection, project=None): - self._base_connection = connection - self.project = project - - @property - def _connection(self): - return self._base_connection - - @property - def _credentials(self): - return self._base_connection.credentials diff --git a/storage/tests/unit/test_client.py b/storage/tests/unit/test_client.py deleted file mode 100644 index f3c090ebbaf9..000000000000 --- a/storage/tests/unit/test_client.py +++ /dev/null @@ -1,1293 +0,0 @@ -# Copyright 2015 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import io -import json -import unittest - -import mock -import pytest -import requests -from six.moves import http_client - - -def _make_credentials(): - import google.auth.credentials - - return mock.Mock(spec=google.auth.credentials.Credentials) - - -def _make_connection(*responses): - import google.cloud.storage._http - from google.cloud.exceptions import NotFound - - mock_conn = mock.create_autospec(google.cloud.storage._http.Connection) - mock_conn.user_agent = "testing 1.2.3" - mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")] - return mock_conn - - -def _make_response(status=http_client.OK, content=b"", headers={}): - response = requests.Response() - response.status_code = status - response._content = content - response.headers = headers - response.request = requests.Request() - return response - - -def _make_json_response(data, status=http_client.OK, headers=None): - headers = headers or {} - headers["Content-Type"] = "application/json" - return _make_response( - status=status, content=json.dumps(data).encode("utf-8"), headers=headers - ) - - -def _make_requests_session(responses): - session = mock.create_autospec(requests.Session, instance=True) - session.request.side_effect = responses - return session - - -class TestClient(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.client import Client - - return Client - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor_connection_type(self): - from google.cloud._http import ClientInfo - from google.cloud.storage._http import Connection - - PROJECT = "PROJECT" - credentials = _make_credentials() - - client = self._make_one(project=PROJECT, credentials=credentials) - - self.assertEqual(client.project, PROJECT) - self.assertIsInstance(client._connection, Connection) - self.assertIs(client._connection.credentials, credentials) - self.assertIsNone(client.current_batch) - self.assertEqual(list(client._batch_stack), []) - self.assertIsInstance(client._connection._client_info, ClientInfo) - self.assertEqual( - client._connection.API_BASE_URL, Connection.DEFAULT_API_ENDPOINT - ) - - def test_ctor_w_empty_client_options(self): - from google.api_core.client_options import ClientOptions - - PROJECT = "PROJECT" - credentials = _make_credentials() - client_options = ClientOptions() - - client = self._make_one( - project=PROJECT, credentials=credentials, client_options=client_options - ) - - self.assertEqual( - client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT - ) - - def test_ctor_w_client_options_dict(self): - - PROJECT = "PROJECT" - credentials = _make_credentials() - client_options = {"api_endpoint": "https://www.foo-googleapis.com"} - - client = self._make_one( - project=PROJECT, credentials=credentials, client_options=client_options - ) - - self.assertEqual( - client._connection.API_BASE_URL, "https://www.foo-googleapis.com" - ) - - def test_ctor_w_client_options_object(self): - from google.api_core.client_options import ClientOptions - - PROJECT = "PROJECT" - credentials = _make_credentials() - client_options = ClientOptions(api_endpoint="https://www.foo-googleapis.com") - - client = self._make_one( - project=PROJECT, credentials=credentials, client_options=client_options - ) - - self.assertEqual( - client._connection.API_BASE_URL, "https://www.foo-googleapis.com" - ) - - def test_ctor_wo_project(self): - from google.cloud.storage._http import Connection - - PROJECT = "PROJECT" - credentials = _make_credentials() - - ddp_patch = mock.patch( - "google.cloud.client._determine_default_project", return_value=PROJECT - ) - - with ddp_patch: - client = self._make_one(credentials=credentials) - - self.assertEqual(client.project, PROJECT) - self.assertIsInstance(client._connection, Connection) - self.assertIs(client._connection.credentials, credentials) - self.assertIsNone(client.current_batch) - self.assertEqual(list(client._batch_stack), []) - - def test_ctor_w_project_explicit_none(self): - from google.cloud.storage._http import Connection - - credentials = _make_credentials() - - client = self._make_one(project=None, credentials=credentials) - - self.assertIsNone(client.project) - self.assertIsInstance(client._connection, Connection) - self.assertIs(client._connection.credentials, credentials) - self.assertIsNone(client.current_batch) - self.assertEqual(list(client._batch_stack), []) - - def test_ctor_w_client_info(self): - from google.cloud._http import ClientInfo - from google.cloud.storage._http import Connection - - credentials = _make_credentials() - client_info = ClientInfo() - - client = self._make_one( - project=None, credentials=credentials, client_info=client_info - ) - - self.assertIsNone(client.project) - self.assertIsInstance(client._connection, Connection) - self.assertIs(client._connection.credentials, credentials) - self.assertIsNone(client.current_batch) - self.assertEqual(list(client._batch_stack), []) - self.assertIs(client._connection._client_info, client_info) - - def test_create_anonymous_client(self): - from google.auth.credentials import AnonymousCredentials - from google.cloud.storage._http import Connection - - klass = self._get_target_class() - client = klass.create_anonymous_client() - - self.assertIsNone(client.project) - self.assertIsInstance(client._connection, Connection) - self.assertIsInstance(client._connection.credentials, AnonymousCredentials) - - def test__push_batch_and__pop_batch(self): - from google.cloud.storage.batch import Batch - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - batch1 = Batch(client) - batch2 = Batch(client) - client._push_batch(batch1) - self.assertEqual(list(client._batch_stack), [batch1]) - self.assertIs(client.current_batch, batch1) - client._push_batch(batch2) - self.assertIs(client.current_batch, batch2) - # list(_LocalStack) returns in reverse order. - self.assertEqual(list(client._batch_stack), [batch2, batch1]) - self.assertIs(client._pop_batch(), batch2) - self.assertEqual(list(client._batch_stack), [batch1]) - self.assertIs(client._pop_batch(), batch1) - self.assertEqual(list(client._batch_stack), []) - - def test__connection_setter(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - client._base_connection = None # Unset the value from the constructor - client._connection = connection = object() - self.assertIs(client._base_connection, connection) - - def test__connection_setter_when_set(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - self.assertRaises(ValueError, setattr, client, "_connection", None) - - def test__connection_getter_no_batch(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - self.assertIs(client._connection, client._base_connection) - self.assertIsNone(client.current_batch) - - def test__connection_getter_with_batch(self): - from google.cloud.storage.batch import Batch - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - batch = Batch(client) - client._push_batch(batch) - self.assertIsNot(client._connection, client._base_connection) - self.assertIs(client._connection, batch) - self.assertIs(client.current_batch, batch) - - def test_get_service_account_email_wo_project(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - EMAIL = "storage-user-123@example.com" - RESOURCE = {"kind": "storage#serviceAccount", "email_address": EMAIL} - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - http = _make_requests_session([_make_json_response(RESOURCE)]) - client._http_internal = http - - service_account_email = client.get_service_account_email() - - self.assertEqual(service_account_email, EMAIL) - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects/%s/serviceAccount" % (PROJECT,), - ] - ) - http.request.assert_called_once_with( - method="GET", url=URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) - - def test_get_service_account_email_w_project(self): - PROJECT = "PROJECT" - OTHER_PROJECT = "OTHER_PROJECT" - CREDENTIALS = _make_credentials() - EMAIL = "storage-user-123@example.com" - RESOURCE = {"kind": "storage#serviceAccount", "email_address": EMAIL} - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - http = _make_requests_session([_make_json_response(RESOURCE)]) - client._http_internal = http - - service_account_email = client.get_service_account_email(project=OTHER_PROJECT) - - self.assertEqual(service_account_email, EMAIL) - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects/%s/serviceAccount" % (OTHER_PROJECT,), - ] - ) - http.request.assert_called_once_with( - method="GET", url=URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) - - def test_bucket(self): - from google.cloud.storage.bucket import Bucket - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - BUCKET_NAME = "BUCKET_NAME" - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - bucket = client.bucket(BUCKET_NAME) - self.assertIsInstance(bucket, Bucket) - self.assertIs(bucket.client, client) - self.assertEqual(bucket.name, BUCKET_NAME) - self.assertIsNone(bucket.user_project) - - def test_bucket_w_user_project(self): - from google.cloud.storage.bucket import Bucket - - PROJECT = "PROJECT" - USER_PROJECT = "USER_PROJECT" - CREDENTIALS = _make_credentials() - BUCKET_NAME = "BUCKET_NAME" - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - bucket = client.bucket(BUCKET_NAME, user_project=USER_PROJECT) - self.assertIsInstance(bucket, Bucket) - self.assertIs(bucket.client, client) - self.assertEqual(bucket.name, BUCKET_NAME) - self.assertEqual(bucket.user_project, USER_PROJECT) - - def test_batch(self): - from google.cloud.storage.batch import Batch - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - batch = client.batch() - self.assertIsInstance(batch, Batch) - self.assertIs(batch._client, client) - - def test_get_bucket_with_string_miss(self): - from google.cloud.exceptions import NotFound - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - NONESUCH = "nonesuch" - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "nonesuch?projection=noAcl", - ] - ) - http = _make_requests_session( - [_make_json_response({}, status=http_client.NOT_FOUND)] - ) - client._http_internal = http - - with self.assertRaises(NotFound): - client.get_bucket(NONESUCH) - - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_get_bucket_with_string_hit(self): - from google.cloud.storage.bucket import Bucket - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - BUCKET_NAME = "bucket-name" - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "%s?projection=noAcl" % (BUCKET_NAME,), - ] - ) - - data = {"name": BUCKET_NAME} - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - bucket = client.get_bucket(BUCKET_NAME) - - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, BUCKET_NAME) - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_get_bucket_with_object_miss(self): - from google.cloud.exceptions import NotFound - from google.cloud.storage.bucket import Bucket - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - nonesuch = "nonesuch" - bucket_obj = Bucket(client, nonesuch) - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "nonesuch?projection=noAcl", - ] - ) - http = _make_requests_session( - [_make_json_response({}, status=http_client.NOT_FOUND)] - ) - client._http_internal = http - - with self.assertRaises(NotFound): - client.get_bucket(bucket_obj) - - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_get_bucket_with_object_hit(self): - from google.cloud.storage.bucket import Bucket - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - bucket_name = "bucket-name" - bucket_obj = Bucket(client, bucket_name) - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "%s?projection=noAcl" % (bucket_name,), - ] - ) - - data = {"name": bucket_name} - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - bucket = client.get_bucket(bucket_obj) - - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, bucket_name) - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_lookup_bucket_miss(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - NONESUCH = "nonesuch" - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "nonesuch?projection=noAcl", - ] - ) - http = _make_requests_session( - [_make_json_response({}, status=http_client.NOT_FOUND)] - ) - client._http_internal = http - - bucket = client.lookup_bucket(NONESUCH) - - self.assertIsNone(bucket) - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_lookup_bucket_hit(self): - from google.cloud.storage.bucket import Bucket - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - BUCKET_NAME = "bucket-name" - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - "%s?projection=noAcl" % (BUCKET_NAME,), - ] - ) - data = {"name": BUCKET_NAME} - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - bucket = client.lookup_bucket(BUCKET_NAME) - - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, BUCKET_NAME) - http.request.assert_called_once_with( - method="GET", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - - def test_create_bucket_w_missing_client_project(self): - credentials = _make_credentials() - client = self._make_one(project=None, credentials=credentials) - - with self.assertRaises(ValueError): - client.create_bucket("bucket") - - def test_create_bucket_w_conflict(self): - from google.cloud.exceptions import Conflict - - project = "PROJECT" - user_project = "USER_PROJECT" - other_project = "OTHER_PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - connection = _make_connection() - client._base_connection = connection - connection.api_request.side_effect = Conflict("testing") - - bucket_name = "bucket-name" - data = {"name": bucket_name} - - with self.assertRaises(Conflict): - client.create_bucket( - bucket_name, project=other_project, user_project=user_project - ) - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={"project": other_project, "userProject": user_project}, - data=data, - _target_object=mock.ANY, - ) - - def test_create_bucket_w_predefined_acl_invalid(self): - project = "PROJECT" - bucket_name = "bucket-name" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - with self.assertRaises(ValueError): - client.create_bucket(bucket_name, predefined_acl="bogus") - - def test_create_bucket_w_predefined_acl_valid(self): - project = "PROJECT" - bucket_name = "bucket-name" - data = {"name": bucket_name} - - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - connection = _make_connection(data) - client._base_connection = connection - bucket = client.create_bucket(bucket_name, predefined_acl="publicRead") - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={"project": project, "predefinedAcl": "publicRead"}, - data=data, - _target_object=bucket, - ) - - def test_create_bucket_w_predefined_default_object_acl_invalid(self): - project = "PROJECT" - bucket_name = "bucket-name" - - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - with self.assertRaises(ValueError): - client.create_bucket(bucket_name, predefined_default_object_acl="bogus") - - def test_create_bucket_w_predefined_default_object_acl_valid(self): - project = "PROJECT" - bucket_name = "bucket-name" - data = {"name": bucket_name} - - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - connection = _make_connection(data) - client._base_connection = connection - bucket = client.create_bucket( - bucket_name, predefined_default_object_acl="publicRead" - ) - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - query_params={ - "project": project, - "predefinedDefaultObjectAcl": "publicRead", - }, - data=data, - _target_object=bucket, - ) - - def test_create_bucket_w_explicit_location(self): - project = "PROJECT" - bucket_name = "bucket-name" - location = "us-central1" - data = {"location": location, "name": bucket_name} - - connection = _make_connection( - data, "{'location': 'us-central1', 'name': 'bucket-name'}" - ) - - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - client._base_connection = connection - - bucket = client.create_bucket(bucket_name, location=location) - - connection.api_request.assert_called_once_with( - method="POST", - path="/b", - data=data, - _target_object=bucket, - query_params={"project": project}, - ) - self.assertEqual(bucket.location, location) - - def test_create_bucket_w_string_success(self): - from google.cloud.storage.bucket import Bucket - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - bucket_name = "bucket-name" - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b?project=%s" % (project,), - ] - ) - json_expected = {"name": bucket_name, "billing": {"requesterPays": True}} - data = json_expected - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - bucket = client.create_bucket(bucket_name, requester_pays=True) - - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, bucket_name) - self.assertTrue(bucket.requester_pays) - http.request.assert_called_once_with( - method="POST", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - json_sent = http.request.call_args_list[0][1]["data"] - self.assertEqual(json_expected, json.loads(json_sent)) - - def test_create_bucket_w_object_success(self): - from google.cloud.storage.bucket import Bucket - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - bucket_name = "bucket-name" - bucket_obj = Bucket(client, bucket_name) - bucket_obj.storage_class = "COLDLINE" - bucket_obj.requester_pays = True - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b?project=%s" % (project,), - ] - ) - json_expected = { - "name": bucket_name, - "billing": {"requesterPays": True}, - "storageClass": "COLDLINE", - } - data = json_expected - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - bucket = client.create_bucket(bucket_obj) - - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, bucket_name) - self.assertTrue(bucket.requester_pays) - http.request.assert_called_once_with( - method="POST", url=URI, data=mock.ANY, headers=mock.ANY, timeout=mock.ANY - ) - json_sent = http.request.call_args_list[0][1]["data"] - self.assertEqual(json_expected, json.loads(json_sent)) - - def test_download_blob_to_file_with_blob(self): - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - blob = mock.Mock() - file_obj = io.BytesIO() - - client.download_blob_to_file(blob, file_obj) - blob.download_to_file.assert_called_once_with( - file_obj, client=client, start=None, end=None - ) - - def test_download_blob_to_file_with_uri(self): - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - blob = mock.Mock() - file_obj = io.BytesIO() - - with mock.patch( - "google.cloud.storage.client.Blob.from_string", return_value=blob - ): - client.download_blob_to_file("gs://bucket_name/path/to/object", file_obj) - - blob.download_to_file.assert_called_once_with( - file_obj, client=client, start=None, end=None - ) - - def test_download_blob_to_file_with_invalid_uri(self): - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - file_obj = io.BytesIO() - - with pytest.raises(ValueError, match="URI scheme must be gs"): - client.download_blob_to_file("http://bucket_name/path/to/object", file_obj) - - def test_list_blobs(self): - from google.cloud.storage.bucket import Bucket - - BUCKET_NAME = "bucket-name" - - credentials = _make_credentials() - client = self._make_one(project="PROJECT", credentials=credentials) - connection = _make_connection({"items": []}) - - with mock.patch( - "google.cloud.storage.client.Client._connection", - new_callable=mock.PropertyMock, - ) as client_mock: - client_mock.return_value = connection - - bucket_obj = Bucket(client, BUCKET_NAME) - iterator = client.list_blobs(bucket_obj) - blobs = list(iterator) - - self.assertEqual(blobs, []) - connection.api_request.assert_called_once_with( - method="GET", - path="/b/%s/o" % BUCKET_NAME, - query_params={"projection": "noAcl"}, - ) - - def test_list_blobs_w_all_arguments_and_user_project(self): - from google.cloud.storage.bucket import Bucket - - BUCKET_NAME = "name" - USER_PROJECT = "user-project-123" - MAX_RESULTS = 10 - PAGE_TOKEN = "ABCD" - PREFIX = "subfolder" - DELIMITER = "/" - VERSIONS = True - PROJECTION = "full" - FIELDS = "items/contentLanguage,nextPageToken" - EXPECTED = { - "maxResults": 10, - "pageToken": PAGE_TOKEN, - "prefix": PREFIX, - "delimiter": DELIMITER, - "versions": VERSIONS, - "projection": PROJECTION, - "fields": FIELDS, - "userProject": USER_PROJECT, - } - - credentials = _make_credentials() - client = self._make_one(project=USER_PROJECT, credentials=credentials) - connection = _make_connection({"items": []}) - - with mock.patch( - "google.cloud.storage.client.Client._connection", - new_callable=mock.PropertyMock, - ) as client_mock: - client_mock.return_value = connection - - bucket = Bucket(client, BUCKET_NAME, user_project=USER_PROJECT) - iterator = client.list_blobs( - bucket_or_name=bucket, - max_results=MAX_RESULTS, - page_token=PAGE_TOKEN, - prefix=PREFIX, - delimiter=DELIMITER, - versions=VERSIONS, - projection=PROJECTION, - fields=FIELDS, - ) - blobs = list(iterator) - - self.assertEqual(blobs, []) - connection.api_request.assert_called_once_with( - method="GET", path="/b/%s/o" % BUCKET_NAME, query_params=EXPECTED - ) - - def test_list_buckets_wo_project(self): - CREDENTIALS = _make_credentials() - client = self._make_one(project=None, credentials=CREDENTIALS) - - with self.assertRaises(ValueError): - client.list_buckets() - - def test_list_buckets_empty(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlparse - - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - http = _make_requests_session([_make_json_response({})]) - client._http_internal = http - - buckets = list(client.list_buckets()) - - self.assertEqual(len(buckets), 0) - - http.request.assert_called_once_with( - method="GET", - url=mock.ANY, - data=mock.ANY, - headers=mock.ANY, - timeout=mock.ANY, - ) - - requested_url = http.request.mock_calls[0][2]["url"] - expected_base_url = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - ] - ) - self.assertTrue(requested_url.startswith(expected_base_url)) - - expected_query = {"project": [PROJECT], "projection": ["noAcl"]} - uri_parts = urlparse(requested_url) - self.assertEqual(parse_qs(uri_parts.query), expected_query) - - def test_list_buckets_explicit_project(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlparse - - PROJECT = "PROJECT" - OTHER_PROJECT = "OTHER_PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - http = _make_requests_session([_make_json_response({})]) - client._http_internal = http - - buckets = list(client.list_buckets(project=OTHER_PROJECT)) - - self.assertEqual(len(buckets), 0) - - http.request.assert_called_once_with( - method="GET", - url=mock.ANY, - data=mock.ANY, - headers=mock.ANY, - timeout=mock.ANY, - ) - - requested_url = http.request.mock_calls[0][2]["url"] - expected_base_url = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - ] - ) - self.assertTrue(requested_url.startswith(expected_base_url)) - - expected_query = {"project": [OTHER_PROJECT], "projection": ["noAcl"]} - uri_parts = urlparse(requested_url) - self.assertEqual(parse_qs(uri_parts.query), expected_query) - - def test_list_buckets_non_empty(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - BUCKET_NAME = "bucket-name" - - data = {"items": [{"name": BUCKET_NAME}]} - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - - buckets = list(client.list_buckets()) - - self.assertEqual(len(buckets), 1) - self.assertEqual(buckets[0].name, BUCKET_NAME) - - http.request.assert_called_once_with( - method="GET", - url=mock.ANY, - data=mock.ANY, - headers=mock.ANY, - timeout=mock.ANY, - ) - - def test_list_buckets_all_arguments(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlparse - - PROJECT = "foo-bar" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - MAX_RESULTS = 10 - PAGE_TOKEN = "ABCD" - PREFIX = "subfolder" - PROJECTION = "full" - FIELDS = "items/id,nextPageToken" - - data = {"items": []} - http = _make_requests_session([_make_json_response(data)]) - client._http_internal = http - iterator = client.list_buckets( - max_results=MAX_RESULTS, - page_token=PAGE_TOKEN, - prefix=PREFIX, - projection=PROJECTION, - fields=FIELDS, - ) - buckets = list(iterator) - self.assertEqual(buckets, []) - http.request.assert_called_once_with( - method="GET", - url=mock.ANY, - data=mock.ANY, - headers=mock.ANY, - timeout=mock.ANY, - ) - - requested_url = http.request.mock_calls[0][2]["url"] - expected_base_url = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "b", - ] - ) - self.assertTrue(requested_url.startswith(expected_base_url)) - - expected_query = { - "project": [PROJECT], - "maxResults": [str(MAX_RESULTS)], - "pageToken": [PAGE_TOKEN], - "prefix": [PREFIX], - "projection": [PROJECTION], - "fields": [FIELDS], - } - uri_parts = urlparse(requested_url) - self.assertEqual(parse_qs(uri_parts.query), expected_query) - - def test_list_buckets_page_empty_response(self): - from google.api_core import page_iterator - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - iterator = client.list_buckets() - page = page_iterator.Page(iterator, (), None) - iterator._page = page - self.assertEqual(list(page), []) - - def test_list_buckets_page_non_empty_response(self): - import six - from google.cloud.storage.bucket import Bucket - - project = "PROJECT" - credentials = _make_credentials() - client = self._make_one(project=project, credentials=credentials) - - blob_name = "bucket-name" - response = {"items": [{"name": blob_name}]} - - def dummy_response(): - return response - - iterator = client.list_buckets() - iterator._get_next_page_response = dummy_response - - page = six.next(iterator.pages) - self.assertEqual(page.num_items, 1) - bucket = six.next(page) - self.assertEqual(page.remaining, 0) - self.assertIsInstance(bucket, Bucket) - self.assertEqual(bucket.name, blob_name) - - def _create_hmac_key_helper(self, explicit_project=None, user_project=None): - import datetime - from pytz import UTC - from six.moves.urllib.parse import urlencode - from google.cloud.storage.hmac_key import HMACKeyMetadata - - PROJECT = "PROJECT" - ACCESS_ID = "ACCESS-ID" - CREDENTIALS = _make_credentials() - EMAIL = "storage-user-123@example.com" - SECRET = "a" * 40 - now = datetime.datetime.utcnow().replace(tzinfo=UTC) - now_stamp = "{}Z".format(now.isoformat()) - - if explicit_project is not None: - expected_project = explicit_project - else: - expected_project = PROJECT - - RESOURCE = { - "kind": "storage#hmacKey", - "metadata": { - "accessId": ACCESS_ID, - "etag": "ETAG", - "id": "projects/{}/hmacKeys/{}".format(PROJECT, ACCESS_ID), - "project": expected_project, - "state": "ACTIVE", - "serviceAccountEmail": EMAIL, - "timeCreated": now_stamp, - "updated": now_stamp, - }, - "secret": SECRET, - } - - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - http = _make_requests_session([_make_json_response(RESOURCE)]) - client._http_internal = http - - kwargs = {} - if explicit_project is not None: - kwargs["project_id"] = explicit_project - - if user_project is not None: - kwargs["user_project"] = user_project - - metadata, secret = client.create_hmac_key(service_account_email=EMAIL, **kwargs) - - self.assertIsInstance(metadata, HMACKeyMetadata) - self.assertIs(metadata._client, client) - self.assertEqual(metadata._properties, RESOURCE["metadata"]) - self.assertEqual(secret, RESOURCE["secret"]) - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects", - expected_project, - "hmacKeys", - ] - ) - qs_params = {"serviceAccountEmail": EMAIL} - - if user_project is not None: - qs_params["userProject"] = user_project - - FULL_URI = "{}?{}".format(URI, urlencode(qs_params)) - http.request.assert_called_once_with( - method="POST", url=FULL_URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) - - def test_create_hmac_key_defaults(self): - self._create_hmac_key_helper() - - def test_create_hmac_key_explicit_project(self): - self._create_hmac_key_helper(explicit_project="other-project-456") - - def test_create_hmac_key_user_project(self): - self._create_hmac_key_helper(user_project="billed-project") - - def test_list_hmac_keys_defaults_empty(self): - PROJECT = "PROJECT" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - http = _make_requests_session([_make_json_response({})]) - client._http_internal = http - - metadatas = list(client.list_hmac_keys()) - - self.assertEqual(len(metadatas), 0) - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects", - PROJECT, - "hmacKeys", - ] - ) - http.request.assert_called_once_with( - method="GET", url=URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) - - def test_list_hmac_keys_explicit_non_empty(self): - from six.moves.urllib.parse import parse_qsl - from google.cloud.storage.hmac_key import HMACKeyMetadata - - PROJECT = "PROJECT" - OTHER_PROJECT = "other-project-456" - MAX_RESULTS = 3 - EMAIL = "storage-user-123@example.com" - ACCESS_ID = "ACCESS-ID" - USER_PROJECT = "billed-project" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - response = { - "kind": "storage#hmacKeysMetadata", - "items": [ - { - "kind": "storage#hmacKeyMetadata", - "accessId": ACCESS_ID, - "serviceAccountEmail": EMAIL, - } - ], - } - - http = _make_requests_session([_make_json_response(response)]) - client._http_internal = http - - metadatas = list( - client.list_hmac_keys( - max_results=MAX_RESULTS, - service_account_email=EMAIL, - show_deleted_keys=True, - project_id=OTHER_PROJECT, - user_project=USER_PROJECT, - ) - ) - - self.assertEqual(len(metadatas), len(response["items"])) - - for metadata, resource in zip(metadatas, response["items"]): - self.assertIsInstance(metadata, HMACKeyMetadata) - self.assertIs(metadata._client, client) - self.assertEqual(metadata._properties, resource) - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects", - OTHER_PROJECT, - "hmacKeys", - ] - ) - EXPECTED_QPARAMS = { - "maxResults": str(MAX_RESULTS), - "serviceAccountEmail": EMAIL, - "showDeletedKeys": "True", - "userProject": USER_PROJECT, - } - http.request.assert_called_once_with( - method="GET", url=mock.ANY, data=None, headers=mock.ANY, timeout=mock.ANY - ) - kwargs = http.request.mock_calls[0].kwargs - uri = kwargs["url"] - base, qparam_str = uri.split("?") - qparams = dict(parse_qsl(qparam_str)) - self.assertEqual(base, URI) - self.assertEqual(qparams, EXPECTED_QPARAMS) - - def test_get_hmac_key_metadata_wo_project(self): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - PROJECT = "PROJECT" - EMAIL = "storage-user-123@example.com" - ACCESS_ID = "ACCESS-ID" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - resource = { - "kind": "storage#hmacKeyMetadata", - "accessId": ACCESS_ID, - "projectId": PROJECT, - "serviceAccountEmail": EMAIL, - } - - http = _make_requests_session([_make_json_response(resource)]) - client._http_internal = http - - metadata = client.get_hmac_key_metadata(ACCESS_ID) - - self.assertIsInstance(metadata, HMACKeyMetadata) - self.assertIs(metadata._client, client) - self.assertEqual(metadata.access_id, ACCESS_ID) - self.assertEqual(metadata.project, PROJECT) - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects", - PROJECT, - "hmacKeys", - ACCESS_ID, - ] - ) - http.request.assert_called_once_with( - method="GET", url=URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) - - def test_get_hmac_key_metadata_w_project(self): - from six.moves.urllib.parse import urlencode - from google.cloud.storage.hmac_key import HMACKeyMetadata - - PROJECT = "PROJECT" - OTHER_PROJECT = "other-project-456" - EMAIL = "storage-user-123@example.com" - ACCESS_ID = "ACCESS-ID" - USER_PROJECT = "billed-project" - CREDENTIALS = _make_credentials() - client = self._make_one(project=PROJECT, credentials=CREDENTIALS) - - resource = { - "kind": "storage#hmacKeyMetadata", - "accessId": ACCESS_ID, - "projectId": OTHER_PROJECT, - "serviceAccountEmail": EMAIL, - } - - http = _make_requests_session([_make_json_response(resource)]) - client._http_internal = http - - metadata = client.get_hmac_key_metadata( - ACCESS_ID, project_id=OTHER_PROJECT, user_project=USER_PROJECT - ) - - self.assertIsInstance(metadata, HMACKeyMetadata) - self.assertIs(metadata._client, client) - self.assertEqual(metadata.access_id, ACCESS_ID) - self.assertEqual(metadata.project, OTHER_PROJECT) - - URI = "/".join( - [ - client._connection.API_BASE_URL, - "storage", - client._connection.API_VERSION, - "projects", - OTHER_PROJECT, - "hmacKeys", - ACCESS_ID, - ] - ) - - qs_params = {"userProject": USER_PROJECT} - FULL_URI = "{}?{}".format(URI, urlencode(qs_params)) - - http.request.assert_called_once_with( - method="GET", url=FULL_URI, data=None, headers=mock.ANY, timeout=mock.ANY - ) diff --git a/storage/tests/unit/test_hmac_key.py b/storage/tests/unit/test_hmac_key.py deleted file mode 100644 index 138742d5b672..000000000000 --- a/storage/tests/unit/test_hmac_key.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class TestHMACKeyMetadata(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.storage.hmac_key import HMACKeyMetadata - - return HMACKeyMetadata - - def _make_one(self, client=None, *args, **kw): - if client is None: - client = _Client() - return self._get_target_class()(client, *args, **kw) - - def test_ctor_defaults(self): - client = object() - metadata = self._make_one(client) - self.assertIs(metadata._client, client) - self.assertEqual(metadata._properties, {}) - self.assertIsNone(metadata.access_id) - self.assertIsNone(metadata.etag) - self.assertIsNone(metadata.id) - self.assertIsNone(metadata.project) - self.assertIsNone(metadata.service_account_email) - self.assertIsNone(metadata.state) - self.assertIsNone(metadata.time_created) - self.assertIsNone(metadata.updated) - - def test_ctor_explicit(self): - OTHER_PROJECT = "other-project-456" - ACCESS_ID = "access-id-123456789" - USER_PROJECT = "billed-project" - client = _Client() - metadata = self._make_one( - client, - access_id=ACCESS_ID, - project_id=OTHER_PROJECT, - user_project=USER_PROJECT, - ) - self.assertIs(metadata._client, client) - expected = {"accessId": ACCESS_ID, "projectId": OTHER_PROJECT} - self.assertEqual(metadata._properties, expected) - self.assertEqual(metadata.access_id, ACCESS_ID) - self.assertEqual(metadata.user_project, USER_PROJECT) - self.assertIsNone(metadata.etag) - self.assertIsNone(metadata.id) - self.assertEqual(metadata.project, OTHER_PROJECT) - self.assertIsNone(metadata.service_account_email) - self.assertIsNone(metadata.state) - self.assertIsNone(metadata.time_created) - self.assertIsNone(metadata.updated) - - def test___eq___other_type(self): - metadata = self._make_one() - for bogus in (None, "bogus", 123, 456.78, [], (), {}, set()): - self.assertNotEqual(metadata, bogus) - - def test___eq___mismatched_client(self): - metadata = self._make_one() - other_client = _Client(project="other-project-456") - other = self._make_one(other_client) - self.assertNotEqual(metadata, other) - - def test___eq___mismatched_access_id(self): - metadata = self._make_one() - metadata._properties["accessId"] = "ABC123" - other = self._make_one(metadata._client) - metadata._properties["accessId"] = "DEF456" - self.assertNotEqual(metadata, other) - - def test___eq___hit(self): - metadata = self._make_one() - metadata._properties["accessId"] = "ABC123" - other = self._make_one(metadata._client) - other._properties["accessId"] = metadata.access_id - self.assertEqual(metadata, other) - - def test___hash__(self): - client = _Client() - metadata = self._make_one(client) - metadata._properties["accessId"] = "ABC123" - self.assertIsInstance(hash(metadata), int) - other = self._make_one(client) - metadata._properties["accessId"] = "DEF456" - self.assertNotEqual(hash(metadata), hash(other)) - - def test_access_id_getter(self): - metadata = self._make_one() - expected = "ACCESS-ID" - metadata._properties["accessId"] = expected - self.assertEqual(metadata.access_id, expected) - - def test_etag_getter(self): - metadata = self._make_one() - expected = "ETAG" - metadata._properties["etag"] = expected - self.assertEqual(metadata.etag, expected) - - def test_id_getter(self): - metadata = self._make_one() - expected = "ID" - metadata._properties["id"] = expected - self.assertEqual(metadata.id, expected) - - def test_project_getter(self): - metadata = self._make_one() - expected = "PROJECT-ID" - metadata._properties["projectId"] = expected - self.assertEqual(metadata.project, expected) - - def test_service_account_email_getter(self): - metadata = self._make_one() - expected = "service_account@example.com" - metadata._properties["serviceAccountEmail"] = expected - self.assertEqual(metadata.service_account_email, expected) - - def test_state_getter(self): - metadata = self._make_one() - expected = "STATE" - metadata._properties["state"] = expected - self.assertEqual(metadata.state, expected) - - def test_state_setter_invalid_state(self): - metadata = self._make_one() - expected = "INVALID" - - with self.assertRaises(ValueError): - metadata.state = expected - - self.assertIsNone(metadata.state) - - def test_state_setter_inactive(self): - metadata = self._make_one() - metadata._properties["state"] = "ACTIVE" - expected = "INACTIVE" - metadata.state = expected - self.assertEqual(metadata.state, expected) - self.assertEqual(metadata._properties["state"], expected) - - def test_state_setter_active(self): - metadata = self._make_one() - metadata._properties["state"] = "INACTIVE" - expected = "ACTIVE" - metadata.state = expected - self.assertEqual(metadata.state, expected) - self.assertEqual(metadata._properties["state"], expected) - - def test_time_created_getter(self): - import datetime - from pytz import UTC - - metadata = self._make_one() - now = datetime.datetime.utcnow() - now_stamp = "{}Z".format(now.isoformat()) - metadata._properties["timeCreated"] = now_stamp - self.assertEqual(metadata.time_created, now.replace(tzinfo=UTC)) - - def test_updated_getter(self): - import datetime - from pytz import UTC - - metadata = self._make_one() - now = datetime.datetime.utcnow() - now_stamp = "{}Z".format(now.isoformat()) - metadata._properties["updated"] = now_stamp - self.assertEqual(metadata.updated, now.replace(tzinfo=UTC)) - - def test_path_wo_access_id(self): - metadata = self._make_one() - - with self.assertRaises(ValueError): - metadata.path - - def test_path_w_access_id_wo_project(self): - access_id = "ACCESS-ID" - client = _Client() - metadata = self._make_one() - metadata._properties["accessId"] = access_id - - expected_path = "/projects/{}/hmacKeys/{}".format( - client.DEFAULT_PROJECT, access_id - ) - self.assertEqual(metadata.path, expected_path) - - def test_path_w_access_id_w_explicit_project(self): - access_id = "ACCESS-ID" - project = "OTHER-PROJECT" - metadata = self._make_one() - metadata._properties["accessId"] = access_id - metadata._properties["projectId"] = project - - expected_path = "/projects/{}/hmacKeys/{}".format(project, access_id) - self.assertEqual(metadata.path, expected_path) - - def test_exists_miss_no_project_set(self): - from google.cloud.exceptions import NotFound - - access_id = "ACCESS-ID" - connection = mock.Mock(spec=["api_request"]) - connection.api_request.side_effect = NotFound("testing") - client = _Client(connection) - metadata = self._make_one(client) - metadata._properties["accessId"] = access_id - - self.assertFalse(metadata.exists()) - - expected_path = "/projects/{}/hmacKeys/{}".format( - client.DEFAULT_PROJECT, access_id - ) - expected_kwargs = {"method": "GET", "path": expected_path, "query_params": {}} - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_exists_hit_w_project_set(self): - project = "PROJECT-ID" - access_id = "ACCESS-ID" - user_project = "billed-project" - email = "service-account@example.com" - resource = { - "kind": "storage#hmacKeyMetadata", - "accessId": access_id, - "serviceAccountEmail": email, - } - connection = mock.Mock(spec=["api_request"]) - connection.api_request.return_value = resource - client = _Client(connection) - metadata = self._make_one(client, user_project=user_project) - metadata._properties["accessId"] = access_id - metadata._properties["projectId"] = project - - self.assertTrue(metadata.exists()) - - expected_path = "/projects/{}/hmacKeys/{}".format(project, access_id) - expected_kwargs = { - "method": "GET", - "path": expected_path, - "query_params": {"userProject": user_project}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_reload_miss_no_project_set(self): - from google.cloud.exceptions import NotFound - - access_id = "ACCESS-ID" - connection = mock.Mock(spec=["api_request"]) - connection.api_request.side_effect = NotFound("testing") - client = _Client(connection) - metadata = self._make_one(client) - metadata._properties["accessId"] = access_id - - with self.assertRaises(NotFound): - metadata.reload() - - expected_path = "/projects/{}/hmacKeys/{}".format( - client.DEFAULT_PROJECT, access_id - ) - expected_kwargs = {"method": "GET", "path": expected_path, "query_params": {}} - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_reload_hit_w_project_set(self): - project = "PROJECT-ID" - access_id = "ACCESS-ID" - user_project = "billed-project" - email = "service-account@example.com" - resource = { - "kind": "storage#hmacKeyMetadata", - "accessId": access_id, - "serviceAccountEmail": email, - } - connection = mock.Mock(spec=["api_request"]) - connection.api_request.return_value = resource - client = _Client(connection) - metadata = self._make_one(client, user_project=user_project) - metadata._properties["accessId"] = access_id - metadata._properties["projectId"] = project - - metadata.reload() - - self.assertEqual(metadata._properties, resource) - - expected_path = "/projects/{}/hmacKeys/{}".format(project, access_id) - expected_kwargs = { - "method": "GET", - "path": expected_path, - "query_params": {"userProject": user_project}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_update_miss_no_project_set(self): - from google.cloud.exceptions import NotFound - - access_id = "ACCESS-ID" - connection = mock.Mock(spec=["api_request"]) - connection.api_request.side_effect = NotFound("testing") - client = _Client(connection) - metadata = self._make_one(client) - metadata._properties["accessId"] = access_id - metadata.state = "INACTIVE" - - with self.assertRaises(NotFound): - metadata.update() - - expected_path = "/projects/{}/hmacKeys/{}".format( - client.DEFAULT_PROJECT, access_id - ) - expected_kwargs = { - "method": "PUT", - "path": expected_path, - "data": {"state": "INACTIVE"}, - "query_params": {}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_update_hit_w_project_set(self): - project = "PROJECT-ID" - access_id = "ACCESS-ID" - user_project = "billed-project" - email = "service-account@example.com" - resource = { - "kind": "storage#hmacKeyMetadata", - "accessId": access_id, - "serviceAccountEmail": email, - "state": "ACTIVE", - } - connection = mock.Mock(spec=["api_request"]) - connection.api_request.return_value = resource - client = _Client(connection) - metadata = self._make_one(client, user_project=user_project) - metadata._properties["accessId"] = access_id - metadata._properties["projectId"] = project - metadata.state = "ACTIVE" - - metadata.update() - - self.assertEqual(metadata._properties, resource) - - expected_path = "/projects/{}/hmacKeys/{}".format(project, access_id) - expected_kwargs = { - "method": "PUT", - "path": expected_path, - "data": {"state": "ACTIVE"}, - "query_params": {"userProject": user_project}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_delete_not_inactive(self): - metadata = self._make_one() - for state in ("ACTIVE", "DELETED"): - metadata._properties["state"] = state - - with self.assertRaises(ValueError): - metadata.delete() - - def test_delete_miss_no_project_set(self): - from google.cloud.exceptions import NotFound - - access_id = "ACCESS-ID" - connection = mock.Mock(spec=["api_request"]) - connection.api_request.side_effect = NotFound("testing") - client = _Client(connection) - metadata = self._make_one(client) - metadata._properties["accessId"] = access_id - metadata.state = "INACTIVE" - - with self.assertRaises(NotFound): - metadata.delete() - - expected_path = "/projects/{}/hmacKeys/{}".format( - client.DEFAULT_PROJECT, access_id - ) - expected_kwargs = { - "method": "DELETE", - "path": expected_path, - "query_params": {}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - def test_delete_hit_w_project_set(self): - project = "PROJECT-ID" - access_id = "ACCESS-ID" - user_project = "billed-project" - connection = mock.Mock(spec=["api_request"]) - connection.api_request.return_value = {} - client = _Client(connection) - metadata = self._make_one(client, user_project=user_project) - metadata._properties["accessId"] = access_id - metadata._properties["projectId"] = project - metadata.state = "INACTIVE" - - metadata.delete() - - expected_path = "/projects/{}/hmacKeys/{}".format(project, access_id) - expected_kwargs = { - "method": "DELETE", - "path": expected_path, - "query_params": {"userProject": user_project}, - } - connection.api_request.assert_called_once_with(**expected_kwargs) - - -class _Client(object): - DEFAULT_PROJECT = "project-123" - - def __init__(self, connection=None, project=DEFAULT_PROJECT): - self._connection = connection - self.project = project - - def __eq__(self, other): - if not isinstance(other, self.__class__): # pragma: NO COVER - return NotImplemented - return self._connection == other._connection and self.project == other.project - - def __hash__(self): - return hash(self._connection) + hash(self.project) diff --git a/storage/tests/unit/test_notification.py b/storage/tests/unit/test_notification.py deleted file mode 100644 index 29b376b57496..000000000000 --- a/storage/tests/unit/test_notification.py +++ /dev/null @@ -1,520 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import mock - - -class TestBucketNotification(unittest.TestCase): - - BUCKET_NAME = "test-bucket" - BUCKET_PROJECT = "bucket-project-123" - TOPIC_NAME = "test-topic" - TOPIC_ALT_PROJECT = "topic-project-456" - TOPIC_REF_FMT = "//pubsub.googleapis.com/projects/{}/topics/{}" - TOPIC_REF = TOPIC_REF_FMT.format(BUCKET_PROJECT, TOPIC_NAME) - TOPIC_ALT_REF = TOPIC_REF_FMT.format(TOPIC_ALT_PROJECT, TOPIC_NAME) - CUSTOM_ATTRIBUTES = {"attr1": "value1", "attr2": "value2"} - BLOB_NAME_PREFIX = "blob-name-prefix/" - NOTIFICATION_ID = "123" - SELF_LINK = "https://example.com/notification/123" - ETAG = "DEADBEEF" - CREATE_PATH = "/b/{}/notificationConfigs".format(BUCKET_NAME) - NOTIFICATION_PATH = "/b/{}/notificationConfigs/{}".format( - BUCKET_NAME, NOTIFICATION_ID - ) - - @staticmethod - def event_types(): - from google.cloud.storage.notification import ( - OBJECT_FINALIZE_EVENT_TYPE, - OBJECT_DELETE_EVENT_TYPE, - ) - - return [OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE] - - @staticmethod - def payload_format(): - from google.cloud.storage.notification import JSON_API_V1_PAYLOAD_FORMAT - - return JSON_API_V1_PAYLOAD_FORMAT - - @staticmethod - def _get_target_class(): - from google.cloud.storage.notification import BucketNotification - - return BucketNotification - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def _make_client(self, project=BUCKET_PROJECT): - from google.cloud.storage.client import Client - - return mock.Mock(project=project, spec=Client) - - def _make_bucket(self, client, name=BUCKET_NAME, user_project=None): - bucket = mock.Mock(spec=["client", "name", "user_project"]) - bucket.client = client - bucket.name = name - bucket.user_project = user_project - return bucket - - def test_ctor_w_missing_project(self): - client = self._make_client(project=None) - bucket = self._make_bucket(client) - - with self.assertRaises(ValueError): - self._make_one(bucket, self.TOPIC_NAME) - - def test_ctor_defaults(self): - from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - client = self._make_client() - bucket = self._make_bucket(client) - - notification = self._make_one(bucket, self.TOPIC_NAME) - - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_name, self.TOPIC_NAME) - self.assertEqual(notification.topic_project, self.BUCKET_PROJECT) - self.assertIsNone(notification.custom_attributes) - self.assertIsNone(notification.event_types) - self.assertIsNone(notification.blob_name_prefix) - self.assertEqual(notification.payload_format, NONE_PAYLOAD_FORMAT) - - def test_ctor_explicit(self): - client = self._make_client() - bucket = self._make_bucket(client) - - notification = self._make_one( - bucket, - self.TOPIC_NAME, - topic_project=self.TOPIC_ALT_PROJECT, - custom_attributes=self.CUSTOM_ATTRIBUTES, - event_types=self.event_types(), - blob_name_prefix=self.BLOB_NAME_PREFIX, - payload_format=self.payload_format(), - ) - - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_name, self.TOPIC_NAME) - self.assertEqual(notification.topic_project, self.TOPIC_ALT_PROJECT) - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, self.event_types()) - self.assertEqual(notification.blob_name_prefix, self.BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, self.payload_format()) - - def test_from_api_repr_no_topic(self): - klass = self._get_target_class() - client = self._make_client() - bucket = self._make_bucket(client) - resource = {} - - with self.assertRaises(ValueError): - klass.from_api_repr(resource, bucket=bucket) - - def test_from_api_repr_invalid_topic(self): - klass = self._get_target_class() - client = self._make_client() - bucket = self._make_bucket(client) - resource = {"topic": "@#$%"} - - with self.assertRaises(ValueError): - klass.from_api_repr(resource, bucket=bucket) - - def test_from_api_repr_minimal(self): - from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - klass = self._get_target_class() - client = self._make_client() - bucket = self._make_bucket(client) - resource = { - "topic": self.TOPIC_REF, - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - "payload_format": NONE_PAYLOAD_FORMAT, - } - - notification = klass.from_api_repr(resource, bucket=bucket) - - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_name, self.TOPIC_NAME) - self.assertEqual(notification.topic_project, self.BUCKET_PROJECT) - self.assertIsNone(notification.custom_attributes) - self.assertIsNone(notification.event_types) - self.assertIsNone(notification.blob_name_prefix) - self.assertEqual(notification.payload_format, NONE_PAYLOAD_FORMAT) - self.assertEqual(notification.etag, self.ETAG) - self.assertEqual(notification.self_link, self.SELF_LINK) - - def test_from_api_repr_explicit(self): - klass = self._get_target_class() - client = self._make_client() - bucket = self._make_bucket(client) - resource = { - "topic": self.TOPIC_ALT_REF, - "custom_attributes": self.CUSTOM_ATTRIBUTES, - "event_types": self.event_types(), - "object_name_prefix": self.BLOB_NAME_PREFIX, - "payload_format": self.payload_format(), - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - } - - notification = klass.from_api_repr(resource, bucket=bucket) - - self.assertIs(notification.bucket, bucket) - self.assertEqual(notification.topic_name, self.TOPIC_NAME) - self.assertEqual(notification.topic_project, self.TOPIC_ALT_PROJECT) - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, self.event_types()) - self.assertEqual(notification.blob_name_prefix, self.BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, self.payload_format()) - self.assertEqual(notification.notification_id, self.NOTIFICATION_ID) - self.assertEqual(notification.etag, self.ETAG) - self.assertEqual(notification.self_link, self.SELF_LINK) - - def test_notification_id(self): - client = self._make_client() - bucket = self._make_bucket(client) - - notification = self._make_one(bucket, self.TOPIC_NAME) - - self.assertIsNone(notification.notification_id) - - notification._properties["id"] = self.NOTIFICATION_ID - self.assertEqual(notification.notification_id, self.NOTIFICATION_ID) - - def test_etag(self): - client = self._make_client() - bucket = self._make_bucket(client) - - notification = self._make_one(bucket, self.TOPIC_NAME) - - self.assertIsNone(notification.etag) - - notification._properties["etag"] = self.ETAG - self.assertEqual(notification.etag, self.ETAG) - - def test_self_link(self): - client = self._make_client() - bucket = self._make_bucket(client) - - notification = self._make_one(bucket, self.TOPIC_NAME) - - self.assertIsNone(notification.self_link) - - notification._properties["selfLink"] = self.SELF_LINK - self.assertEqual(notification.self_link, self.SELF_LINK) - - def test_create_w_existing_notification_id(self): - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - - with self.assertRaises(ValueError): - notification.create() - - def test_create_w_defaults(self): - from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - api_request = client._connection.api_request - api_request.return_value = { - "topic": self.TOPIC_REF, - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - "payload_format": NONE_PAYLOAD_FORMAT, - } - - notification.create() - - self.assertEqual(notification.notification_id, self.NOTIFICATION_ID) - self.assertEqual(notification.etag, self.ETAG) - self.assertEqual(notification.self_link, self.SELF_LINK) - self.assertIsNone(notification.custom_attributes) - self.assertIsNone(notification.event_types) - self.assertIsNone(notification.blob_name_prefix) - self.assertEqual(notification.payload_format, NONE_PAYLOAD_FORMAT) - - data = {"topic": self.TOPIC_REF, "payload_format": NONE_PAYLOAD_FORMAT} - api_request.assert_called_once_with( - method="POST", path=self.CREATE_PATH, query_params={}, data=data - ) - - def test_create_w_explicit_client(self): - USER_PROJECT = "user-project-123" - client = self._make_client() - alt_client = self._make_client() - bucket = self._make_bucket(client, user_project=USER_PROJECT) - notification = self._make_one( - bucket, - self.TOPIC_NAME, - topic_project=self.TOPIC_ALT_PROJECT, - custom_attributes=self.CUSTOM_ATTRIBUTES, - event_types=self.event_types(), - blob_name_prefix=self.BLOB_NAME_PREFIX, - payload_format=self.payload_format(), - ) - api_request = alt_client._connection.api_request - api_request.return_value = { - "topic": self.TOPIC_ALT_REF, - "custom_attributes": self.CUSTOM_ATTRIBUTES, - "event_types": self.event_types(), - "object_name_prefix": self.BLOB_NAME_PREFIX, - "payload_format": self.payload_format(), - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - } - - notification.create(client=alt_client) - - self.assertEqual(notification.custom_attributes, self.CUSTOM_ATTRIBUTES) - self.assertEqual(notification.event_types, self.event_types()) - self.assertEqual(notification.blob_name_prefix, self.BLOB_NAME_PREFIX) - self.assertEqual(notification.payload_format, self.payload_format()) - self.assertEqual(notification.notification_id, self.NOTIFICATION_ID) - self.assertEqual(notification.etag, self.ETAG) - self.assertEqual(notification.self_link, self.SELF_LINK) - - data = { - "topic": self.TOPIC_ALT_REF, - "custom_attributes": self.CUSTOM_ATTRIBUTES, - "event_types": self.event_types(), - "object_name_prefix": self.BLOB_NAME_PREFIX, - "payload_format": self.payload_format(), - } - api_request.assert_called_once_with( - method="POST", - path=self.CREATE_PATH, - query_params={"userProject": USER_PROJECT}, - data=data, - ) - - def test_exists_wo_notification_id(self): - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - - with self.assertRaises(ValueError): - notification.exists() - - def test_exists_miss(self): - from google.cloud.exceptions import NotFound - - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.side_effect = NotFound("testing") - - self.assertFalse(notification.exists()) - - api_request.assert_called_once_with( - method="GET", path=self.NOTIFICATION_PATH, query_params={} - ) - - def test_exists_hit(self): - USER_PROJECT = "user-project-123" - client = self._make_client() - bucket = self._make_bucket(client, user_project=USER_PROJECT) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.return_value = { - "topic": self.TOPIC_REF, - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - } - - self.assertTrue(notification.exists(client=client)) - - api_request.assert_called_once_with( - method="GET", - path=self.NOTIFICATION_PATH, - query_params={"userProject": USER_PROJECT}, - ) - - def test_reload_wo_notification_id(self): - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - - with self.assertRaises(ValueError): - notification.reload() - - def test_reload_miss(self): - from google.cloud.exceptions import NotFound - - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.side_effect = NotFound("testing") - - with self.assertRaises(NotFound): - notification.reload() - - api_request.assert_called_once_with( - method="GET", path=self.NOTIFICATION_PATH, query_params={} - ) - - def test_reload_hit(self): - from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT - - USER_PROJECT = "user-project-123" - client = self._make_client() - bucket = self._make_bucket(client, user_project=USER_PROJECT) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.return_value = { - "topic": self.TOPIC_REF, - "id": self.NOTIFICATION_ID, - "etag": self.ETAG, - "selfLink": self.SELF_LINK, - "payload_format": NONE_PAYLOAD_FORMAT, - } - - notification.reload(client=client) - - self.assertEqual(notification.etag, self.ETAG) - self.assertEqual(notification.self_link, self.SELF_LINK) - self.assertIsNone(notification.custom_attributes) - self.assertIsNone(notification.event_types) - self.assertIsNone(notification.blob_name_prefix) - self.assertEqual(notification.payload_format, NONE_PAYLOAD_FORMAT) - - api_request.assert_called_once_with( - method="GET", - path=self.NOTIFICATION_PATH, - query_params={"userProject": USER_PROJECT}, - ) - - def test_delete_wo_notification_id(self): - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - - with self.assertRaises(ValueError): - notification.delete() - - def test_delete_miss(self): - from google.cloud.exceptions import NotFound - - client = self._make_client() - bucket = self._make_bucket(client) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.side_effect = NotFound("testing") - - with self.assertRaises(NotFound): - notification.delete() - - api_request.assert_called_once_with( - method="DELETE", path=self.NOTIFICATION_PATH, query_params={} - ) - - def test_delete_hit(self): - USER_PROJECT = "user-project-123" - client = self._make_client() - bucket = self._make_bucket(client, user_project=USER_PROJECT) - notification = self._make_one(bucket, self.TOPIC_NAME) - notification._properties["id"] = self.NOTIFICATION_ID - api_request = client._connection.api_request - api_request.return_value = None - - notification.delete(client=client) - - api_request.assert_called_once_with( - method="DELETE", - path=self.NOTIFICATION_PATH, - query_params={"userProject": USER_PROJECT}, - ) - - -class Test__parse_topic_path(unittest.TestCase): - @staticmethod - def _call_fut(*args, **kwargs): - from google.cloud.storage import notification - - return notification._parse_topic_path(*args, **kwargs) - - @staticmethod - def _make_topic_path(project, topic_name): - from google.cloud.storage import notification - - return notification._TOPIC_REF_FMT.format(project, topic_name) - - def test_project_name_too_long(self): - project = "a" * 31 - topic_path = self._make_topic_path(project, "topic-name") - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_project_name_uppercase(self): - project = "aaaAaa" - topic_path = self._make_topic_path(project, "topic-name") - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_leading_digit(self): - project = "1aaaaa" - topic_path = self._make_topic_path(project, "topic-name") - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_leading_hyphen(self): - project = "-aaaaa" - topic_path = self._make_topic_path(project, "topic-name") - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_trailing_hyphen(self): - project = "aaaaa-" - topic_path = self._make_topic_path(project, "topic-name") - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_invalid_format(self): - topic_path = "@#$%" - with self.assertRaises(ValueError): - self._call_fut(topic_path) - - def test_success(self): - topic_name = "tah-pic-nehm" - project_choices = ( - "a" * 30, # Max length. - "a-b--c---d", # Valid hyphen usage. - "abcdefghijklmnopqrstuvwxyz", # Valid letters. - "z0123456789", # Valid digits (non-leading). - "a-bcdefghijklmn12opqrstuv0wxyz", - ) - for project in project_choices: - topic_path = self._make_topic_path(project, topic_name) - result = self._call_fut(topic_path) - self.assertEqual(result, (topic_name, project)) diff --git a/storage/tests/unit/url_signer_v4_test_account.json b/storage/tests/unit/url_signer_v4_test_account.json deleted file mode 100644 index 5fdc01240ef8..000000000000 --- a/storage/tests/unit/url_signer_v4_test_account.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "service_account", - "project_id": "dummy-project-id", - "private_key_id": "ffffffffffffffffffffffffffffffffffffffff", - "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCsPzMirIottfQ2\nryjQmPWocSEeGo7f7Q4/tMQXHlXFzo93AGgU2t+clEj9L5loNhLVq+vk+qmnyDz5\nQ04y8jVWyMYzzGNNrGRW/yaYqnqlKZCy1O3bmnNjV7EDbC/jE1ZLBY0U3HaSHfn6\nS9ND8MXdgD0/ulRTWwq6vU8/w6i5tYsU7n2LLlQTl1fQ7/emO9nYcCFJezHZVa0H\nmeWsdHwWsok0skwQYQNIzP3JF9BpR5gJT2gNge6KopDesJeLoLzaX7cUnDn+CAnn\nLuLDwwSsIVKyVxhBFsFXPplgpaQRwmGzwEbf/Xpt9qo26w2UMgn30jsOaKlSeAX8\ncS6ViF+tAgMBAAECggEACKRuJCP8leEOhQziUx8Nmls8wmYqO4WJJLyk5xUMUC22\nSI4CauN1e0V8aQmxnIc0CDkFT7qc9xBmsMoF+yvobbeKrFApvlyzNyM7tEa/exh8\nDGD/IzjbZ8VfWhDcUTwn5QE9DCoon9m1sG+MBNlokB3OVOt8LieAAREdEBG43kJu\nyQTOkY9BGR2AY1FnAl2VZ/jhNDyrme3tp1sW1BJrawzR7Ujo8DzlVcS2geKA9at7\n55ua5GbHz3hfzFgjVXDfnkWzId6aHypUyqHrSn1SqGEbyXTaleKTc6Pgv0PgkJjG\nhZazWWdSuf1T5Xbs0OhAK9qraoAzT6cXXvMEvvPt6QKBgQDXcZKqJAOnGEU4b9+v\nOdoh+nssdrIOBNMu1m8mYbUVYS1aakc1iDGIIWNM3qAwbG+yNEIi2xi80a2RMw2T\n9RyCNB7yqCXXVKLBiwg9FbKMai6Vpk2bWIrzahM9on7AhCax/X2AeOp+UyYhFEy6\nUFG4aHb8THscL7b515ukSuKb5QKBgQDMq+9PuaB0eHsrmL6q4vHNi3MLgijGg/zu\nAXaPygSYAwYW8KglcuLZPvWrL6OG0+CrfmaWTLsyIZO4Uhdj7MLvX6yK7IMnagvk\nL3xjgxSklEHJAwi5wFeJ8ai/1MIuCn8p2re3CbwISKpvf7Sgs/W4196P4vKvTiAz\njcTiSYFIKQKBgCjMpkS4O0TakMlGTmsFnqyOneLmu4NyIHgfPb9cA4n/9DHKLKAT\noaWxBPgatOVWs7RgtyGYsk+XubHkpC6f3X0+15mGhFwJ+CSE6tN+l2iF9zp52vqP\nQwkjzm7+pdhZbmaIpcq9m1K+9lqPWJRz/3XXuqi+5xWIZ7NaxGvRjqaNAoGAdK2b\nutZ2y48XoI3uPFsuP+A8kJX+CtWZrlE1NtmS7tnicdd19AtfmTuUL6fz0FwfW4Su\nlQZfPT/5B339CaEiq/Xd1kDor+J7rvUHM2+5p+1A54gMRGCLRv92FQ4EON0RC1o9\nm2I4SHysdO3XmjmdXmfp4BsgAKJIJzutvtbqlakCgYB+Cb10z37NJJ+WgjDt+yT2\nyUNH17EAYgWXryfRgTyi2POHuJitd64Xzuy6oBVs3wVveYFM6PIKXlj8/DahYX5I\nR2WIzoCNLL3bEZ+nC6Jofpb4kspoAeRporj29SgesK6QBYWHWX2H645RkRGYGpDo\n51gjy9m/hSNqBbH2zmh04A==\n-----END PRIVATE KEY-----\n", - "client_email": "test-iam-credentials@dummy-project-id.iam.gserviceaccount.com", - "client_id": "000000000000000000000", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://oauth2.googleapis.com/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com" -} \ No newline at end of file diff --git a/storage/tests/unit/url_signer_v4_test_data.json b/storage/tests/unit/url_signer_v4_test_data.json deleted file mode 100644 index 807f6cf49a3e..000000000000 --- a/storage/tests/unit/url_signer_v4_test_data.json +++ /dev/null @@ -1,122 +0,0 @@ -[ - { - "description": "Simple GET", - "bucket": "test-bucket", - "object": "test-object", - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host&X-Goog-Signature=95e6a13d43a1d1962e667f17397f2b80ac9bdd1669210d5e08e0135df9dff4e56113485dbe429ca2266487b9d1796ebdee2d7cf682a6ef3bb9fbb4c351686fba90d7b621cf1c4eb1fdf126460dd25fa0837dfdde0a9fd98662ce60844c458448fb2b352c203d9969cb74efa4bdb742287744a4f2308afa4af0e0773f55e32e92973619249214b97283b2daa14195244444e33f938138d1e5f561088ce8011f4986dda33a556412594db7c12fc40e1ff3f1bedeb7a42f5bcda0b9567f17f65855f65071fabb88ea12371877f3f77f10e1466fff6ff6973b74a933322ff0949ce357e20abe96c3dd5cfab42c9c83e740a4d32b9e11e146f0eb3404d2e975896f74" - }, - - { - "description": "Simple PUT", - "bucket": "test-bucket", - "object": "test-object", - "method": "PUT", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host&X-Goog-Signature=8adff1d4285739e31aa68e73767a46bc5511fde377497dbe08481bf5ceb34e29cc9a59921748d8ec3dd4085b7e9b7772a952afedfcdaecb3ae8352275b8b7c867f204e3db85076220a3127a8a9589302fc1181eae13b9b7fe41109ec8cdc93c1e8bac2d7a0cc32a109ca02d06957211326563ab3d3e678a0ba296e298b5fc5e14593c99d444c94724cc4be97015dbff1dca377b508fa0cb7169195de98d0e4ac96c42b918d28c8d92d33e1bd125ce0fb3cd7ad2c45dae65c22628378f6584971b8bf3945b26f2611eb651e9b6a8648970c1ecf386bb71327b082e7296c4e1ee2fc0bdd8983da80af375c817fb1ad491d0bc22c0f51dba0d66e2cffbc90803e47" - }, - - { - "description": "POST for resumable uploads", - "bucket": "test-bucket", - "object": "test-object", - "method": "POST", - "expiration": 10, - "headers": { - "x-goog-resumable": "start" - }, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host%3Bx-goog-resumable&X-Goog-Signature=4a6d39b23343cedf4c30782aed4b384001828c79ffa3a080a481ea01a640dea0a0ceb58d67a12cef3b243c3f036bb3799c6ee88e8db3eaf7d0bdd4b70a228d0736e07eaa1ee076aff5c6ce09dff1f1f03a0d8ead0d2893408dd3604fdabff553aa6d7af2da67cdba6790006a70240f96717b98f1a6ccb24f00940749599be7ef72aaa5358db63ddd54b2de9e2d6d6a586eac4fe25f36d86fc6ab150418e9c6fa01b732cded226c6d62fc95b72473a4cc55a8257482583fe66d9ab6ede909eb41516a8690946c3e87b0f2052eb0e97e012a14b2f721c42e6e19b8a1cd5658ea36264f10b9b1ada66b8ed5bf7ed7d1708377ac6e5fe608ae361fb594d2e5b24c54" - }, - - { - "description": "Vary expiration and timestamp", - "bucket": "test-bucket", - "object": "test-object", - "method": "GET", - "expiration": 20, - "timestamp": "20190301T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190301%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190301T090000Z&X-Goog-Expires=20&X-Goog-SignedHeaders=host&X-Goog-Signature=9669ed5b10664dc594c758296580662912cf4bcc5a4ba0b6bf055bcbf6f34eed7bdad664f534962174a924741a0c273a4f67bc1847cef20192a6beab44223bd9d4fbbd749c407b79997598c30f82ddc269ff47ec09fa3afe74e00616d438df0d96a7d8ad0adacfad1dc3286f864d924fe919fb0dce45d3d975c5afe8e13af2db9cc37ba77835f92f7669b61e94c6d562196c1274529e76cfff1564cc2cad7d5387dc8e12f7a5dfd925685fe92c30b43709eee29fa2f66067472cee5423d1a3a4182fe8cea75c9329d181dc6acad7c393cd04f8bf5bc0515127d8ebd65d80c08e19ad03316053ea60033fd1b1fd85a69c576415da3bf0a3718d9ea6d03e0d66f0" - }, - - { - "description": "Vary bucket and object", - "bucket": "test-bucket2", - "object": "test-object2", - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket2/test-object2?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host&X-Goog-Signature=36e3d58dfd3ec1d2dd2f24b5ee372a71e811ffaa2162a2b871d26728d0354270bc116face87127532969c4a3967ed05b7309af741e19c7202f3167aa8c2ac420b61417d6451442bb91d7c822cd17be8783f01e05372769c88913561d27e6660dd8259f0081a71f831be6c50283626cbf04494ac10c394b29bb3bce74ab91548f58a37118a452693cf0483d77561fc9cac8f1765d2c724994cca46a83517a10157ee0347a233a2aaeae6e6ab5e204ff8fc5f54f90a3efdb8301d9fff5475d58cd05b181affd657f48203f4fb133c3a3d355b8eefbd10d5a0a5fd70d06e9515460ad74e22334b2cba4b29cae4f6f285cdb92d8f3126d7a1479ca3bdb69c207d860" - }, - - { - "description": "Simple headers", - "bucket": "test-bucket", - "object": "test-object", - "headers": { - "foo": "foo-value", - "BAR": "BAR-value" - }, - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=bar%3Bfoo%3Bhost&X-Goog-Signature=68ecd3b008328ed30d91e2fe37444ed7b9b03f28ed4424555b5161980531ef87db1c3a5bc0265aad5640af30f96014c94fb2dba7479c41bfe1c020eb90c0c6d387d4dd09d4a5df8b60ea50eb6b01cdd786a1e37020f5f95eb8f9b6cd3f65a1f8a8a65c9fcb61ea662959efd9cd73b683f8d8804ef4d6d9b2852419b013368842731359d7f9e6d1139032ceca75d5e67cee5fd0192ea2125e5f2955d38d3d50cf116f3a52e6a62de77f6207f5b95aaa1d7d0f8a46de89ea72e7ea30f21286318d7eba0142232b0deb3a1dc9e1e812a981c66b5ffda3c6b01a8a9d113155792309fd53a3acfd054ca7776e8eec28c26480cd1e3c812f67f91d14217f39a606669d" - }, - - { - "description": "Headers should be trimmed", - "bucket": "test-bucket", - "object": "test-object", - "headers": { - "leading": " xyz", - "trailing": "abc ", - "collapsed": "abc def" - }, - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=collapsed%3Bhost%3Bleading%3Btrailing&X-Goog-Signature=1839511d6238d9ac2bbcbba8b23515b3757db35dfa7b8f9bc4b8b4aa270224df747c812526f1a3bcf294d67ed84cd14e074c36bc090e0a542782934a7c925af4a5ea68123e97533704ce8b08ccdf5fe6b412f89c9fc4de243e29abdb098382c5672188ee3f6fef7131413e252c78e7a35658825ad842a50609e9cc463731e17284ff7a14824c989f87cef22fb99dfec20cfeed69d8b3a08f00b43b8284eecd535e50e982b05cd74c5750cd5f986cfc21a2a05f7f3ab7fc31bd684ed1b823b64d29281e923fc6580c49005552ca19c253de087d9d2df881144e44eda40965cfdb4889bf3a35553c9809f4ed20b8355be481b92b9618952b6a04f3017b36053e15" - }, - - { - "description": "Header value with multiple inline values", - "bucket": "test-bucket", - "object": "test-object", - "headers": { - "multiple": " xyz , abc, def , xyz " - }, - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host%3Bmultiple&X-Goog-Signature=5cc113735625341f59c7203f0c2c9febc95ba6af6b9c38814f8e523214712087dc0996e4960d273ae1889f248ac1e58d4d19cb3a69ad7670e9a8ca1b434e878f59339dc7006cf32dfd715337e9f593e0504371839174962a08294586e0c78160a7aa303397888c8350637c6af3b32ac310886cc4590bfda9ca561ee58fb5b8ec56bc606d2ada6e7df31f4276e9dcb96bcaea39dc2cd096f3fad774f9c4b30e317ad43736c05f76831437f44e8726c1e90d3f6c9827dc273f211f32fc85658dfc5d357eb606743a6b00a29e519eef1bebaf9db3e8f4b1f5f9afb648ad06e60bc42fa8b57025056697c874c9ea76f5a73201c9717ea43e54713ff3502ff3fc626b" - }, - - { - "description": "Customer-supplied encryption key", - "bucket": "test-bucket", - "object": "test-object", - "headers": - { - "X-Goog-Encryption-Key": "key", - "X-Goog-Encryption-Key-Sha256": "key-hash", - "X-Goog-Encryption-Algorithm": "AES256" - }, - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket/test-object?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host%3Bx-goog-encryption-algorithm%3Bx-goog-encryption-key%3Bx-goog-encryption-key-sha256&X-Goog-Signature=278a1c5a3bad248637054a047014760353942433955871031ed08f515b54588654ad033e91f046ab202b68673030e117d1b786c325e870238b035ba75b3feed560a17aff9bab6bddebd4a31a52cb68b214e27d3b0bd886502c6b36b164306fe88b5a07c6063592afe746b2a5d205dbe90dd5386b94f0a78f75d9f53ee884e18f476e8fc2eb1dd910ce0b4ae1f5d7b09876ef9bf983f539c028429e14bad3c75dbd4ed1ae37856f6d6f8a1805eaf8b52a0d6fc993902e4c1ee8de477661f7b67c3663000474cb00e178189789b2a3ed6bd21b4ade684fca8108ac4dd106acb17f5954d045775f7aa5a98ebda5d3075e11a8ea49c64c6ad1481e463e8c9f11f704" - }, - - { - "description": "List Objects", - "bucket": "test-bucket", - "object": "", - "method": "GET", - "expiration": 10, - "timestamp": "20190201T090000Z", - "expectedUrl": "https://storage.googleapis.com/test-bucket?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=test-iam-credentials%40dummy-project-id.iam.gserviceaccount.com%2F20190201%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20190201T090000Z&X-Goog-Expires=10&X-Goog-SignedHeaders=host&X-Goog-Signature=6dbe94f8e52b2b8a9a476b1c857efa474e09944e2b52b925800316e094a7169d8dbe0df9c0ac08dabb22ac7e827470ceccd65f5a3eadba2a4fb9beebfe37f0d9bb1e552b851fa31a25045bdf019e507f5feb44f061551ef1aeb18dcec0e38ba2e2f77d560a46eaace9c56ed9aa642281301a9d848b0eb30749e34bc7f73a3d596240533466ff9b5f289cd0d4c845c7d96b82a35a5abd0c3aff83e4440ee6873e796087f43545544dc8c01afe1d79c726696b6f555371e491980e7ec145cca0803cf562c38f3fa1d724242f5dea25aac91d74ec9ddd739ff65523627763eaef25cd1f95ad985aaf0079b7c74eb5bcb2870a9b137a7b2c8e41fbe838c95872f75b" - } -]