From ca10df92e6b2b83895dbd8697f68416c22ca6e1d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 21 Jul 2021 16:01:18 -0400 Subject: [PATCH 01/10] tests: break out shared setup / teardown into pytest fixtures --- tests/system/__init__.py | 15 ++++++ tests/system/_helpers.py | 19 ++++++++ tests/system/conftest.py | 101 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 135 insertions(+) create mode 100644 tests/system/__init__.py create mode 100644 tests/system/_helpers.py create mode 100644 tests/system/conftest.py diff --git a/tests/system/__init__.py b/tests/system/__init__.py new file mode 100644 index 000000000..4de65971c --- /dev/null +++ b/tests/system/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py new file mode 100644 index 000000000..6d7e0292b --- /dev/null +++ b/tests/system/_helpers.py @@ -0,0 +1,19 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.api_core import exceptions +from test_utils import retry + + +retry_429 = retry.RetryErrors(exceptions.TooManyRequests, max_tries=9) diff --git a/tests/system/conftest.py b/tests/system/conftest.py new file mode 100644 index 000000000..9d3a4a42f --- /dev/null +++ b/tests/system/conftest.py @@ -0,0 +1,101 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +import pytest +from test_utils.system import unique_resource_id + +from google.cloud._helpers import UTC +from google.cloud.bigtable.client import Client +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from . import _helpers + + +@pytest.fixture(scope="session") +def in_emulator(): + return os.getenv(BIGTABLE_EMULATOR) is not None + + +@pytest.fixture(scope="session") +def not_in_emulator(in_emulator): + if in_emulator: + pytest.skip("Emulator does not support this feature") + + +@pytest.fixture(scope="session") +def unique_suffix(): + return unique_resource_id("-") + + +@pytest.fixture(scope="session") +def location_id(): + return "us-central1-c" + + +@pytest.fixture(scope="session") +def serve_nodes(): + return 3 + + +@pytest.fixture(scope="session") +def instance_labels(): + label_key = "python-system" + label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") + ) + return {label_key: str(label_stamp)} + + +@pytest.fixture(scope="session") +def admin_client(): + return Client(admin=True) + + +@pytest.fixture(scope="session") +def admin_instance_id(unique_suffix): + return f"g-c-p{unique_suffix}" + + +@pytest.fixture(scope="session") +def admin_cluster_id(admin_instance_id): + return f"{admin_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def admin_instance(admin_client, admin_instance_id, instance_labels, in_emulator): + return admin_client.instance(admin_instance_id, labels=instance_labels) + + +@pytest.fixture(scope="session") +def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): + return admin_instance.cluster( + admin_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + + +@pytest.fixture(scope="session") +def admin_instance_populated(admin_instance, admin_cluster, in_emulator): + if not in_emulator: + operation = admin_instance.create(clusters=[admin_cluster]) + operation.result(timeout=30) + + yield admin_instance + + if not in_emulator: + _helpers.retry_429(admin_instance.delete)() From 7a483cb4f06cfd4bf80f4ca71c2fff396c3ef6ae Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 21 Jul 2021 16:01:58 -0400 Subject: [PATCH 02/10] tests: move instance admin systests into a separate module --- tests/system/_helpers.py | 11 + tests/system/conftest.py | 31 +- tests/system/test_instance_admin.py | 625 ++++++++++++++++ tests/{system.py => system/test_monolith.py} | 709 ------------------- 4 files changed, 656 insertions(+), 720 deletions(-) create mode 100644 tests/system/test_instance_admin.py rename tests/{system.py => system/test_monolith.py} (52%) diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 6d7e0292b..3c88f6aae 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -12,8 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime + from google.api_core import exceptions +from google.cloud._helpers import UTC from test_utils import retry retry_429 = retry.RetryErrors(exceptions.TooManyRequests, max_tries=9) + + +def label_stamp(): + return ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") + ) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index 9d3a4a42f..0aa7b0d6b 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -12,13 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime import os import pytest from test_utils.system import unique_resource_id -from google.cloud._helpers import UTC from google.cloud.bigtable.client import Client from google.cloud.environment_vars import BIGTABLE_EMULATOR @@ -30,6 +28,18 @@ def in_emulator(): return os.getenv(BIGTABLE_EMULATOR) is not None +@pytest.fixture(scope="session") +def kms_key_name(): + return os.getenv("KMS_KEY_NAME") + + +@pytest.fixture(scope="session") +def with_kms_key_name(kms_key_name): + if kms_key_name is None: + pytest.skip("Test requires KMS_KEY_NAME environment variable") + return kms_key_name + + @pytest.fixture(scope="session") def not_in_emulator(in_emulator): if in_emulator: @@ -52,14 +62,13 @@ def serve_nodes(): @pytest.fixture(scope="session") -def instance_labels(): - label_key = "python-system" - label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") - ) - return {label_key: str(label_stamp)} +def label_key(): + return "python-system" + + +@pytest.fixture(scope="session") +def instance_labels(label_key): + return {label_key: _helpers.label_stamp()} @pytest.fixture(scope="session") @@ -78,7 +87,7 @@ def admin_cluster_id(admin_instance_id): @pytest.fixture(scope="session") -def admin_instance(admin_client, admin_instance_id, instance_labels, in_emulator): +def admin_instance(admin_client, admin_instance_id, instance_labels): return admin_client.instance(admin_instance_id, labels=instance_labels) diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py new file mode 100644 index 000000000..2a3cf2fa3 --- /dev/null +++ b/tests/system/test_instance_admin.py @@ -0,0 +1,625 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable import enums +from google.cloud.bigtable.table import ClusterState + +import pytest + +from . import _helpers + + +@pytest.fixture(scope="function") +def instances_to_delete(): + instances_to_delete = [] + + yield instances_to_delete + + for instance in instances_to_delete: + _helpers.retry_429(instance.delete)() + + +def _create_app_profile_helper( + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, +): + + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + assert app_profile.allow_transactional_writes == allow_transactional_writes + + app_profile.create(ignore_warnings=ignore_warnings) + + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + + app_profile.app_profile_id == alt_app_profile.app_profile_id + app_profile.routing_policy_type == routing_policy_type + alt_app_profile.routing_policy_type == routing_policy_type + app_profile.description == alt_app_profile.description + assert not app_profile.allow_transactional_writes + assert not alt_app_profile.allow_transactional_writes + + return app_profile + + +def _list_app_profiles_helper(instance, expected_app_profile_ids): + app_profiles = instance.list_app_profiles() + found = [app_prof.app_profile_id for app_prof in app_profiles] + for expected in expected_app_profile_ids: + assert expected in found + + +def _modify_app_profile_helper( + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, +): + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + + operation = app_profile.update(ignore_warnings=ignore_warnings) + operation.result(timeout=30) + + alt_profile = instance.app_profile(app_profile_id) + alt_profile.reload() + + assert alt_profile.description == description + assert alt_profile.routing_policy_type == routing_policy_type + assert alt_profile.cluster_id == cluster_id + assert alt_profile.allow_transactional_writes == allow_transactional_writes + + +def _delete_app_profile_helper(app_profile): + assert app_profile.exists() + app_profile.delete(ignore_warnings=True) + assert not app_profile.exists() + + +def test_client_list_instances(admin_client, admin_instance_populated, not_in_emulator): + instances, failed_locations = admin_client.list_instances() + + assert failed_locations == [] + + found = set([instance.name for instance in instances]) + assert admin_instance_populated.name in found + + +def test_instance_exists_hit(admin_instance_populated): + assert admin_instance_populated.exists() + + +def test_instance_exists_miss(admin_client): + alt_instance = admin_client.instance("nonesuch-instance") + assert not alt_instance.exists() + + +def test_instance_reload( + admin_client, admin_instance_id, admin_instance_populated, not_in_emulator +): + # Use same arguments as 'admin_instance_populated' + # so we can use reload() on a fresh instance. + alt_instance = admin_client.instance(admin_instance_id) + # Make sure metadata unset before reloading. + alt_instance.display_name = None + + alt_instance.reload() + + assert alt_instance.display_name == admin_instance_populated.display_name + assert alt_instance.labels == admin_instance_populated.labels + assert alt_instance.type_ == enums.Instance.Type.PRODUCTION + + +def test_instance_create_prod( + admin_client, + unique_suffix, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + from google.cloud.bigtable import enums + + alt_instance_id = f"ndef{unique_suffix}" + instance = admin_client.instance(alt_instance_id, labels=instance_labels) + alt_cluster_id = f"{alt_instance_id}-cluster" + serve_nodes = 1 + cluster = instance.cluster( + alt_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + assert instance.type_ is None + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance_alt.type_ == enums.Instance.Type.PRODUCTION + + +def test_instance_create_development( + admin_client, + unique_suffix, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"new{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=instance_labels, + ) + alt_cluster_id = f"{alt_instance_id}-cluster" + cluster = instance.cluster(alt_cluster_id, location_id=location_id) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + assert instance_alt.labels == instance_labels + assert instance_alt.state == enums.Instance.State.READY + + +def test_instance_create_w_two_clusters( + admin_client, + unique_suffix, + admin_instance_populated, + admin_cluster, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"dif{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.PRODUCTION, + labels=instance_labels, + ) + + serve_nodes = 1 + + alt_cluster_id_1 = f"{alt_instance_id}-c1" + cluster_1 = instance.cluster( + alt_cluster_id_1, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + + alt_cluster_id_2 = f"{alt_instance_id}-c2" + location_id_2 = "us-central1-f" + cluster_2 = instance.cluster( + alt_cluster_id_2, + location_id=location_id_2, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + instances_to_delete.append(instance) + operation.result(timeout=120) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + + clusters, failed_locations = instance_alt.list_clusters() + assert failed_locations == [] + + alt_cluster_1, alt_cluster_2 = sorted(clusters, key=lambda x: x.name) + + assert cluster_1.location_id == alt_cluster_1.location_id + assert alt_cluster_1.state == enums.Cluster.State.READY + assert cluster_1.serve_nodes == alt_cluster_1.serve_nodes + assert cluster_1.default_storage_type == alt_cluster_1.default_storage_type + assert cluster_2.location_id == alt_cluster_2.location_id + assert alt_cluster_2.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster_2.serve_nodes + assert cluster_2.default_storage_type == alt_cluster_2.default_storage_type + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = admin_client.list_clusters() + assert not failed_locations + found = set([cluster.name for cluster in clusters]) + expected = {alt_cluster_1.name, alt_cluster_2.name, admin_cluster.name} + assert expected.issubset(found) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + EncryptionType = enums.EncryptionInfo.EncryptionType + encryption_info = temp_table.get_encryption_info() + assert ( + encryption_info[alt_cluster_id_1][0].encryption_type + == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + assert ( + encryption_info[alt_cluster_id_2][0].encryption_type + == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + + c_states = temp_table.get_cluster_states() + cluster_ids = set(c_states.keys()) + assert cluster_ids == {alt_cluster_id_1, alt_cluster_id_2} + + ReplicationState = enums.Table.ReplicationState + expected_results = [ + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY), + ] + + for clusterstate in c_states.values(): + assert clusterstate in expected_results + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + app_profile_1 = _create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + ) + app_profiles_to_delete.append(app_profile_1) + + # Test list app profiles + _list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> alt_cluster_id_1, + # allow_transactional_writes -> disallowed + # modify description + description = "to routing policy-single" + routing = enums.RoutingPolicyType.SINGLE + _modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_1, + allow_transactional_writes=False, + ) + + # Test modify app profile app_profile_id_1 + # cluster -> alt_cluster_id_2, + # allow_transactional_writes -> allowed + _modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=True, + ignore_warnings=True, + ) + + # Test create app profile with single cluster routing policy + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" + routing = enums.RoutingPolicyType.SINGLE + app_profile_2 = _create_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=False, + ) + app_profiles_to_delete.append(app_profile_2) + + # Test list app profiles + _list_app_profiles_helper(instance, [app_profile_id_1, app_profile_id_2]) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + _modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=True, + ) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = "to routing policy-multy" + routing = enums.RoutingPolicyType.ANY + _modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True, + ) + + # Test delete app profiles + for app_profile in app_profiles_to_delete: + _delete_app_profile_helper(app_profile) + + +def test_instance_create_w_two_clusters_cmek( + admin_client, + unique_suffix, + admin_instance_populated, + admin_cluster, + location_id, + instance_labels, + instances_to_delete, + with_kms_key_name, + not_in_emulator, +): + alt_instance_id = f"dif-cmek{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.PRODUCTION, + labels=instance_labels, + ) + + serve_nodes = 1 + + alt_cluster_id_1 = f"{alt_instance_id}-c1" + cluster_1 = instance.cluster( + alt_cluster_id_1, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + kms_key_name=with_kms_key_name, + ) + + alt_cluster_id_2 = f"{alt_instance_id}-c2" + location_id_2 = "us-central1-f" + cluster_2 = instance.cluster( + alt_cluster_id_2, + location_id=location_id_2, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + kms_key_name=with_kms_key_name, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + instances_to_delete.append(instance) + operation.result(timeout=120) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + + clusters, failed_locations = instance_alt.list_clusters() + assert failed_locations == [] + + alt_cluster_1, alt_cluster_2 = sorted(clusters, key=lambda x: x.name) + + assert cluster_1.location_id == alt_cluster_1.location_id + assert alt_cluster_1.state == enums.Cluster.State.READY + assert cluster_1.serve_nodes == alt_cluster_1.serve_nodes + assert cluster_1.default_storage_type == alt_cluster_1.default_storage_type + assert cluster_2.location_id == alt_cluster_2.location_id + assert alt_cluster_2.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster_2.serve_nodes + assert cluster_2.default_storage_type == alt_cluster_2.default_storage_type + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = admin_client.list_clusters() + assert not failed_locations + found = set([cluster.name for cluster in clusters]) + expected = {alt_cluster_1.name, alt_cluster_2.name, admin_cluster.name} + assert expected.issubset(found) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + EncryptionType = enums.EncryptionInfo.EncryptionType + encryption_info = temp_table.get_encryption_info() + assert ( + encryption_info[alt_cluster_id_1][0].encryption_type + == EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + assert ( + encryption_info[alt_cluster_id_2][0].encryption_type + == EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + + +def test_instance_update_display_name_and_labels( + admin_client, + admin_instance_id, + admin_instance_populated, + label_key, + instance_labels, + not_in_emulator, +): + old_display_name = admin_instance_populated.display_name + new_display_name = "Foo Bar Baz" + + new_labels = {label_key: _helpers.label_stamp()} + admin_instance_populated.display_name = new_display_name + admin_instance_populated.labels = new_labels + + operation = admin_instance_populated.update() + operation.result(timeout=30) # ensure the operation completes. + + # Create a new instance instance and reload it. + instance_alt = admin_client.instance(admin_instance_id, labels={}) + assert instance_alt.display_name == old_display_name + assert instance_alt.labels == {} + + instance_alt.reload() + + assert instance_alt.display_name == new_display_name + assert instance_alt.labels == new_labels + + # Make sure to put the instance back the way it was for the + # other test cases. + admin_instance_populated.display_name = old_display_name + admin_instance_populated.labels = instance_labels + operation = admin_instance_populated.update() + operation.result(timeout=30) # ensure the operation completes. + + +def test_instance_update_w_type( + admin_client, + unique_suffix, + admin_instance_populated, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"ndif{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=instance_labels, + ) + alt_cluster_id = f"{alt_instance_id}-cluster" + cluster = instance.cluster( + alt_cluster_id, location_id=location_id, + ) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + + instance.display_name = None + instance.type_ = enums.Instance.Type.PRODUCTION + operation = instance.update() + operation.result(timeout=30) # ensure the operation completes. + + # Create a new instance instance and reload it. + instance_alt = admin_client.instance(alt_instance_id) + assert instance_alt.type_ is None + instance_alt.reload() + assert instance_alt.type_ == enums.Instance.Type.PRODUCTION + + +def test_cluster_exists_hit(admin_cluster, not_in_emulator): + assert admin_cluster.exists() + + +def test_cluster_exists_miss(admin_instance_populated, not_in_emulator): + alt_cluster = admin_instance_populated.cluster("nonesuch-cluster") + assert not alt_cluster.exists() + + +def test_cluster_create( + admin_instance_populated, admin_instance_id, +): + alt_cluster_id = f"{admin_instance_id}-c2" + alt_location_id = "us-central1-f" + serve_nodes = 2 + + cluster_2 = admin_instance_populated.cluster( + alt_cluster_id, + location_id=alt_location_id, + serve_nodes=serve_nodes, + default_storage_type=(enums.StorageType.SSD), + ) + operation = cluster_2.create() + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new object instance, reload and make sure it is the same. + alt_cluster = admin_instance_populated.cluster(alt_cluster_id) + alt_cluster.reload() + + assert cluster_2 == alt_cluster + assert cluster_2.location_id == alt_cluster.location_id + assert alt_cluster.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster.serve_nodes + assert cluster_2.default_storage_type == alt_cluster.default_storage_type + + # Delete the newly created cluster and confirm + assert cluster_2.exists() + cluster_2.delete() + assert not cluster_2.exists() + + +def test_cluster_update( + admin_instance_populated, + admin_cluster_id, + admin_cluster, + serve_nodes, + not_in_emulator, +): + new_serve_nodes = 4 + + admin_cluster.serve_nodes = new_serve_nodes + + operation = admin_cluster.update() + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new cluster instance and reload it. + alt_cluster = admin_instance_populated.cluster(admin_cluster_id) + alt_cluster.reload() + assert alt_cluster.serve_nodes == new_serve_nodes + + # Put the cluster back the way it was for the other test cases. + admin_cluster.serve_nodes = serve_nodes + operation = admin_cluster.update() + operation.result(timeout=30) # Ensure the operation completes. diff --git a/tests/system.py b/tests/system/test_monolith.py similarity index 52% rename from tests/system.py rename to tests/system/test_monolith.py index aa3c1cac6..d3935614d 100644 --- a/tests/system.py +++ b/tests/system/test_monolith.py @@ -18,8 +18,6 @@ import time import unittest -import pytest - from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests @@ -155,713 +153,6 @@ def tearDownModule(): retry_429(Config.INSTANCE_DATA.delete)() -class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): - if Config.IN_EMULATOR: - self.skipTest("Instance Admin API not supported in emulator") - self.instances_to_delete = [] - - def tearDown(self): - for instance in self.instances_to_delete: - retry_429(instance.delete)() - - def test_list_instances(self): - instances, failed_locations = Config.CLIENT.list_instances() - - self.assertEqual(failed_locations, []) - - found = set([instance.name for instance in instances]) - self.assertTrue(Config.INSTANCE.name in found) - - def test_reload(self): - from google.cloud.bigtable import enums - - # Use same arguments as Config.INSTANCE (created in `setUpModule`) - # so we can use reload() on a fresh instance. - alt_instance = Config.CLIENT.instance(INSTANCE_ID) - # Make sure metadata unset before reloading. - alt_instance.display_name = None - - alt_instance.reload() - self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) - self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) - self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance_defaults(self): - from google.cloud.bigtable import enums - - ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - serve_nodes = 1 - cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes - ) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - # Make sure that by default a PRODUCTION type instance is created - self.assertIsNone(instance.type_) - self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance(self): - from google.cloud.bigtable import enums - - _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - - ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance_alt.labels, LABELS) - self.assertEqual(instance_alt.state, enums.Instance.State.READY) - - def test_cluster_exists(self): - NONEXISTING_CLUSTER_ID = "cluster-id" - - cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) - self.assertTrue(cluster.exists()) - self.assertFalse(alt_cluster.exists()) - - def test_instance_exists(self): - NONEXISTING_INSTANCE_ID = "instancer-id" - - alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) - self.assertTrue(Config.INSTANCE.exists()) - self.assertFalse(alt_instance.exists()) - - def test_create_instance_w_two_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - serve_nodes = 1 - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=120) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - - encryption_info = temp_table.get_encryption_info() - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - @pytest.mark.skipif( - not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" - ) - def test_create_instance_w_two_clusters_cmek(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif-cmek" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - serve_nodes = 1 - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=KMS_KEY_NAME, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=KMS_KEY_NAME, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=120) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - - encryption_info = temp_table.get_encryption_info() - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - def test_update_display_name_and_labels(self): - OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = "Foo Bar Baz" - n_label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") - ) - - NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} - Config.INSTANCE.display_name = NEW_DISPLAY_NAME - Config.INSTANCE.labels = NEW_LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, LABELS) - instance_alt.reload() - self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, NEW_LABELS) - - # Make sure to put the instance back the way it was for the - # other test cases. - Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.labels = LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - def test_update_type(self): - from google.cloud.bigtable.enums import Instance - - _DEVELOPMENT = Instance.Type.DEVELOPMENT - _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - operation = instance.create(location_id=LOCATION_ID) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Unset the display_name - instance.display_name = None - - instance.type_ = _PRODUCTION - operation = instance.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - self.assertIsNone(instance_alt.type_) - instance_alt.reload() - self.assertEqual(instance_alt.type_, _PRODUCTION) - - def test_update_cluster(self): - NEW_SERVE_NODES = 4 - - Config.CLUSTER.serve_nodes = NEW_SERVE_NODES - - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new cluster instance and reload it. - alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster.reload() - self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) - - # Make sure to put the cluster back the way it was for the - # other test cases. - Config.CLUSTER.serve_nodes = SERVE_NODES - operation = Config.CLUSTER.update() - operation.result(timeout=30) - - def test_create_cluster(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - ALT_CLUSTER_ID = INSTANCE_ID + "-c2" - ALT_LOCATION_ID = "us-central1-f" - ALT_SERVE_NODES = 2 - - cluster_2 = Config.INSTANCE.cluster( - ALT_CLUSTER_ID, - location_id=ALT_LOCATION_ID, - serve_nodes=ALT_SERVE_NODES, - default_storage_type=(StorageType.SSD), - ) - operation = cluster_2.create() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new object instance, reload and make sure it is the same. - alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) - alt_cluster.reload() - - self.assertEqual(cluster_2, alt_cluster) - self.assertEqual(cluster_2.location_id, alt_cluster.location_id) - self.assertEqual(alt_cluster.state, Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster.default_storage_type - ) - - # Delete the newly created cluster and confirm - self.assertTrue(cluster_2.exists()) - cluster_2.delete() - self.assertFalse(cluster_2.exists()) - - def _test_create_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - app_profile = app_profile.create(ignore_warnings=ignore_warnings) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - - self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) - self.assertEqual(app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(app_profile.description, alt_app_profile.description) - self.assertFalse(app_profile.allow_transactional_writes) - self.assertFalse(alt_app_profile.allow_transactional_writes) - - def _test_list_app_profiles_helper(self, instance, app_profile_ids): - app_profiles = instance.list_app_profiles() - found = [app_prof.app_profile_id for app_prof in app_profiles] - for app_profile_id in app_profile_ids: - self.assertTrue(app_profile_id in found) - - def _test_modify_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - - operation = app_profile.update(ignore_warnings) - operation.result(timeout=30) - - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - self.assertEqual(alt_app_profile.description, description) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.cluster_id, cluster_id) - self.assertEqual( - alt_app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def _test_delete_app_profile_helper(self, app_profile_id, instance): - app_profile = instance.app_profile(app_profile_id) - self.assertTrue(app_profile.exists()) - app_profile.delete(ignore_warnings=True) - self.assertFalse(app_profile.exists()) - - class TestTableAdminAPI(unittest.TestCase): @classmethod def setUpClass(cls): From 7e20e879f380f701eaba50da2b9b634f3d90df05 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 21 Jul 2021 18:20:55 -0400 Subject: [PATCH 03/10] tests: move table admin systests into a separate module --- tests/system/_helpers.py | 3 + tests/system/conftest.py | 58 +++++ tests/system/test_instance_admin.py | 12 - tests/system/test_monolith.py | 295 ----------------------- tests/system/test_table_admin.py | 351 ++++++++++++++++++++++++++++ 5 files changed, 412 insertions(+), 307 deletions(-) create mode 100644 tests/system/test_table_admin.py diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 3c88f6aae..3a5d18edc 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -20,6 +20,9 @@ retry_429 = retry.RetryErrors(exceptions.TooManyRequests, max_tries=9) +retry_504 = retry.RetryErrors(exceptions.DeadlineExceeded) +retry_until_true = retry.RetryResult(lambda result: result) +retry_until_false = retry.RetryResult(lambda result: not result) def label_stamp(): diff --git a/tests/system/conftest.py b/tests/system/conftest.py index 0aa7b0d6b..9c3b3fcd5 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -76,6 +76,15 @@ def admin_client(): return Client(admin=True) +@pytest.fixture(scope="session") +def service_account(admin_client): + from google.oauth2.service_account import Credentials + + if not isinstance(admin_client._credentials, Credentials): + pytest.skip("These tests require a service account credential") + return admin_client._credentials + + @pytest.fixture(scope="session") def admin_instance_id(unique_suffix): return f"g-c-p{unique_suffix}" @@ -108,3 +117,52 @@ def admin_instance_populated(admin_instance, admin_cluster, in_emulator): if not in_emulator: _helpers.retry_429(admin_instance.delete)() + + +@pytest.fixture(scope="session") +def data_client(): + return Client(admin=False) + + +@pytest.fixture(scope="session") +def data_instance_id(unique_suffix): + return f"g-c-p-d{unique_suffix}" + + +@pytest.fixture(scope="session") +def data_cluster_id(data_instance_id): + return f"{data_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def data_instance_populated( + admin_client, + data_instance_id, + instance_labels, + data_cluster_id, + location_id, + serve_nodes, + in_emulator, +): + if not in_emulator: + instance = admin_client.instance(data_instance_id, labels=instance_labels) + cluster = instance.cluster( + data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + operation = instance.create(clusters=[cluster]) + operation.result(timeout=30) + + yield instance + + if not in_emulator: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="function") +def instances_to_delete(): + instances_to_delete = [] + + yield instances_to_delete + + for instance in instances_to_delete: + _helpers.retry_429(instance.delete)() diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py index 2a3cf2fa3..9c08aaec1 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/test_instance_admin.py @@ -15,21 +15,9 @@ from google.cloud.bigtable import enums from google.cloud.bigtable.table import ClusterState -import pytest - from . import _helpers -@pytest.fixture(scope="function") -def instances_to_delete(): - instances_to_delete = [] - - yield instances_to_delete - - for instance in instances_to_delete: - _helpers.retry_429(instance.delete)() - - def _create_app_profile_helper( app_profile_id, instance, diff --git a/tests/system/test_monolith.py b/tests/system/test_monolith.py index d3935614d..d9ef4d5be 100644 --- a/tests/system/test_monolith.py +++ b/tests/system/test_monolith.py @@ -15,15 +15,11 @@ import datetime import operator import os -import time import unittest -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests from google.cloud.environment_vars import BIGTABLE_EMULATOR from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult # from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id @@ -32,9 +28,6 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import UTC from google.cloud.bigtable.client import Client -from google.cloud.bigtable.column_family import MaxVersionsGCRule -from google.cloud.bigtable.policy import Policy -from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE from google.cloud.bigtable.row_filters import ApplyLabelFilter from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain @@ -153,294 +146,6 @@ def tearDownModule(): retry_429(Config.INSTANCE_DATA.delete)() -class TestTableAdminAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = Config.INSTANCE_DATA.table(TABLE_ID) - cls._table.create() - - @classmethod - def tearDownClass(cls): - cls._table.delete() - - def setUp(self): - self.tables_to_delete = [] - self.backups_to_delete = [] - - def tearDown(self): - for table in self.tables_to_delete: - table.delete() - for backup in self.backups_to_delete: - backup.delete() - - def _skip_if_emulated(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def test_list_tables(self): - # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the - # table created in `setUpClass` here will be the only one. - tables = Config.INSTANCE_DATA.list_tables() - self.assertEqual(tables, [self._table]) - - def test_exists(self): - retry_until_true = RetryResult(lambda result: result) - retry_until_false = RetryResult(lambda result: not result) - temp_table_id = "test-table_existence" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - self.assertFalse(temp_table.exists()) - temp_table.create() - self.assertTrue(retry_until_true(temp_table.exists)()) - temp_table.delete() - self.assertFalse(retry_until_false(temp_table.exists)()) - - def test_create_table(self): - temp_table_id = "test-create-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - # First, create a sorted version of our expected result. - name_attr = operator.attrgetter("name") - expected_tables = sorted([temp_table, self._table], key=name_attr) - - # Then query for the tables in the instance and sort them by - # name as well. - tables = Config.INSTANCE_DATA.list_tables() - sorted_tables = sorted(tables, key=name_attr) - self.assertEqual(sorted_tables, expected_tables) - - def test_test_iam_permissions(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-test-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - permissions_allowed = temp_table.test_iam_permissions(permissions) - self.assertEqual(permissions, permissions_allowed) - - def test_get_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-get-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - policy = temp_table.get_iam_policy().to_api_repr() - self.assertEqual(policy["etag"], "ACAB") - self.assertEqual(policy["version"], 0) - - def test_set_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-set-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - new_policy = Policy() - service_account_email = Config.CLIENT._credentials.service_account_email - new_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.service_account(service_account_email) - ] - policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() - - self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") - self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) - - def test_create_table_with_families(self): - temp_table_id = "test-create-table-with-failies" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - gc_rule = MaxVersionsGCRule(1) - temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) - self.tables_to_delete.append(temp_table) - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, temp_table) - self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_create_table_with_split_keys(self): - self._skip_if_emulated("Split keys are not supported by Bigtable emulator") - temp_table_id = "foo-bar-baz-split-table" - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create(initial_split_keys=initial_split_keys) - self.tables_to_delete.append(temp_table) - - # Read Sample Row Keys for created splits - sample_row_keys = temp_table.sample_row_keys() - actual_keys = [srk.row_key for srk in sample_row_keys] - - expected_keys = initial_split_keys - expected_keys.append(b"") - - self.assertEqual(actual_keys, expected_keys) - - def test_create_column_family(self): - temp_table_id = "test-create-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, column_family._table) - self.assertEqual( - retrieved_col_fam.column_family_id, column_family.column_family_id - ) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_update_column_family(self): - temp_table_id = "test-update-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - # Check that our created table is as expected. - col_fams = temp_table.list_column_families() - self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) - - # Update the column family's GC rule and then try to update. - column_family.gc_rule = None - column_family.update() - - # Check that the update has propagated. - col_fams = temp_table.list_column_families() - self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) - - def test_delete_column_family(self): - temp_table_id = "test-delete-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1) - column_family.create() - - # Make sure the family is there before deleting it. - col_fams = temp_table.list_column_families() - self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) - - retry_504 = RetryErrors(DeadlineExceeded) - retry_504(column_family.delete)() - # Make sure we have successfully deleted it. - self.assertEqual(temp_table.list_column_families(), {}) - - def test_backup(self): - if Config.IN_EMULATOR: - self.skipTest("backups are not supported in the emulator") - - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums - - temp_table_id = "test-backup-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - temp_backup_id = "test-backup" - - # TODO: consider using `datetime.datetime.now().timestamp()` - # when support for Python 2 is fully dropped - expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 - - # Testing `Table.backup()` factory - temp_backup = temp_table.backup( - temp_backup_id, - cluster_id=CLUSTER_ID_DATA, - expire_time=datetime.datetime.utcfromtimestamp(expire), - ) - - # Reinitialize the admin client. This is to test `_table_admin_client` returns a client object (and not NoneType) - temp_backup._instance._client = Client(admin=True) - - # Sanity check for `Backup.exists()` method - self.assertFalse(temp_backup.exists()) - - # Testing `Backup.create()` method - temp_backup.create().result(timeout=30) - - # Implicit testing of `Backup.delete()` method - self.backups_to_delete.append(temp_backup) - - # Testing `Backup.exists()` method - self.assertTrue(temp_backup.exists()) - - # Testing `Table.list_backups()` method - temp_table_backup = temp_table.list_backups()[0] - self.assertEqual(temp_backup_id, temp_table_backup.backup_id) - self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) - self.assertEqual(expire, temp_table_backup.expire_time.seconds) - self.assertEqual( - temp_table_backup.encryption_info.encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - - # Testing `Backup.update_expire_time()` method - expire += 3600 # A one-hour change in the `expire_time` parameter - updated_time = datetime.datetime.utcfromtimestamp(expire) - temp_backup.update_expire_time(updated_time) - test = _datetime_to_pb_timestamp(updated_time) - - # Testing `Backup.get()` method - temp_table_backup = temp_backup.get() - self.assertEqual( - test.seconds, - DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), - ) - - # Testing `Table.restore()` and `Backup.retore()` methods - restored_table_id = "test-backup-table-restored" - restored_table = Config.INSTANCE_DATA.table(restored_table_id) - temp_table.restore( - restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id, - ).result(timeout=30) - tables = Config.INSTANCE_DATA.list_tables() - self.assertIn(restored_table, tables) - restored_table.delete() - - # Testing `Backup.restore()` into a different instance: - # Setting up another instance... - alt_instance_id = "gcp-alt-" + UNIQUE_SUFFIX - alt_cluster_id = alt_instance_id + "-cluster" - alt_instance = Config.CLIENT.instance(alt_instance_id, labels=LABELS) - alt_cluster = alt_instance.cluster( - cluster_id=alt_cluster_id, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - ) - if not Config.IN_EMULATOR: - alt_instance.create(clusters=[alt_cluster]).result(timeout=30) - - # Testing `restore()`... - temp_backup.restore(restored_table_id, alt_instance_id).result(timeout=30) - restored_table = alt_instance.table(restored_table_id) - self.assertIn(restored_table, alt_instance.list_tables()) - restored_table.delete() - - # Tearing down the resources... - if not Config.IN_EMULATOR: - retry_429(alt_instance.delete)() - - class TestDataAPI(unittest.TestCase): @classmethod def setUpClass(cls): diff --git a/tests/system/test_table_admin.py b/tests/system/test_table_admin.py new file mode 100644 index 000000000..232c6d0fc --- /dev/null +++ b/tests/system/test_table_admin.py @@ -0,0 +1,351 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator +import time + +import pytest +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +from . import _helpers + + +@pytest.fixture(scope="module") +def shared_table_id(): + return "google-cloud-python-test-table" + + +@pytest.fixture(scope="module") +def shared_table(data_instance_populated, shared_table_id): + table = data_instance_populated.table(shared_table_id) + table.create() + + yield table + + table.delete() + + +@pytest.fixture(scope="function") +def tables_to_delete(): + tables_to_delete = [] + + yield tables_to_delete + + for table in tables_to_delete: + table.delete() + + +@pytest.fixture(scope="function") +def backups_to_delete(): + backups_to_delete = [] + + yield backups_to_delete + + for backup in backups_to_delete: + backup.delete() + + +def test_instance_list_tables(data_instance_populated, shared_table): + # Since `data_instance_populated` is newly created, the + # table created in `shared_table` here will be the only one. + tables = data_instance_populated.list_tables() + assert tables == [shared_table] + + +def test_table_exists(data_instance_populated): + temp_table_id = "test-table_exists" + temp_table = data_instance_populated.table(temp_table_id) + assert not temp_table.exists() + + temp_table.create() + assert _helpers.retry_until_true(temp_table.exists)() + + temp_table.delete() + assert not _helpers.retry_until_false(temp_table.exists)() + + +def test_table_create(data_instance_populated, shared_table, tables_to_delete): + temp_table_id = "test-table-create" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + # First, create a sorted version of our expected result. + name_attr = operator.attrgetter("name") + expected_tables = sorted([temp_table, shared_table], key=name_attr) + + # Then query for the tables in the instance and sort them by + # name as well. + tables = data_instance_populated.list_tables() + sorted_tables = sorted(tables, key=name_attr) + assert sorted_tables == expected_tables + + +def test_table_create_w_families( + data_instance_populated, tables_to_delete, +): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-create-table-with-failies" + column_family_id = "col-fam-id1" + temp_table = data_instance_populated.table(temp_table_id) + gc_rule = MaxVersionsGCRule(1) + temp_table.create(column_families={column_family_id: gc_rule}) + tables_to_delete.append(temp_table) + + col_fams = temp_table.list_column_families() + assert len(col_fams) == 1 + + retrieved_col_fam = col_fams[column_family_id] + assert retrieved_col_fam._table is temp_table + assert retrieved_col_fam.column_family_id == column_family_id + assert retrieved_col_fam.gc_rule == gc_rule + + +def test_table_create_w_split_keys( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "foo-bar-baz-split-table" + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create(initial_split_keys=initial_split_keys) + tables_to_delete.append(temp_table) + + # Read Sample Row Keys for created splits + sample_row_keys = temp_table.sample_row_keys() + actual_keys = [srk.row_key for srk in sample_row_keys] + + expected_keys = initial_split_keys + expected_keys.append(b"") + assert actual_keys == expected_keys + + +def test_column_family_create(data_instance_populated, tables_to_delete): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-create-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + assert temp_table.list_column_families() == {} + + column_family_id = "col-fam-id1" + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(column_family_id, gc_rule=gc_rule) + column_family.create() + + col_fams = temp_table.list_column_families() + assert len(col_fams) == 1 + + retrieved_col_fam = col_fams[column_family_id] + assert retrieved_col_fam._table is column_family._table + assert retrieved_col_fam.column_family_id == column_family.column_family_id + assert retrieved_col_fam.gc_rule == gc_rule + + +def test_column_family_update(data_instance_populated, tables_to_delete): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-update-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + column_family_id = "col-fam-id1" + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(column_family_id, gc_rule=gc_rule) + column_family.create() + + # Check that our created table is as expected. + col_fams = temp_table.list_column_families() + assert col_fams == {column_family_id: column_family} + + # Update the column family's GC rule and then try to update. + column_family.gc_rule = None + column_family.update() + + # Check that the update has propagated. + col_fams = temp_table.list_column_families() + assert col_fams[column_family_id].gc_rule is None + + +def test_column_family_delete(data_instance_populated, tables_to_delete): + temp_table_id = "test-delete-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + column_family_id = "col-fam-id1" + assert temp_table.list_column_families() == {} + column_family = temp_table.column_family(column_family_id) + column_family.create() + + # Make sure the family is there before deleting it. + col_fams = temp_table.list_column_families() + assert list(col_fams.keys()) == [column_family_id] + + _helpers.retry_504(column_family.delete)() + # Make sure we have successfully deleted it. + assert temp_table.list_column_families() == {} + + +def test_table_get_iam_policy( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "test-get-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + policy = temp_table.get_iam_policy().to_api_repr() + assert policy["etag"] == "ACAB" + assert policy["version"] == 0 + + +def test_table_set_iam_policy( + service_account, data_instance_populated, tables_to_delete, not_in_emulator, +): + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + + temp_table_id = "test-set-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + new_policy = Policy() + service_account_email = service_account.service_account_email + new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] + policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() + + assert policy_latest["bindings"][0]["role"] == BIGTABLE_ADMIN_ROLE + assert service_account_email in policy_latest["bindings"][0]["members"][0] + + +def test_table_test_iam_permissions( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "test-test-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + permissions_allowed = temp_table.test_iam_permissions(permissions) + assert permissions == permissions_allowed + + +def test_table_backup( + admin_client, + unique_suffix, + instance_labels, + location_id, + data_instance_populated, + data_cluster_id, + instances_to_delete, + tables_to_delete, + backups_to_delete, + not_in_emulator, +): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums + + temp_table_id = "test-backup-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + temp_backup_id = "test-backup" + + # TODO: consider using `datetime.datetime.now().timestamp()` + # when support for Python 2 is fully dropped + expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 + + # Testing `Table.backup()` factory + temp_backup = temp_table.backup( + temp_backup_id, + cluster_id=data_cluster_id, + expire_time=datetime.datetime.utcfromtimestamp(expire), + ) + + # Reinitialize the admin client. This is to test `_table_admin_client` + # returns a client object (and not NoneType) + temp_backup._instance._client = admin_client + + # Sanity check for `Backup.exists()` method + assert not temp_backup.exists() + + # Testing `Backup.create()` method + backup_op = temp_backup.create() + backup_op.result(timeout=30) + + # Implicit testing of `Backup.delete()` method + backups_to_delete.append(temp_backup) + + # Testing `Backup.exists()` method + assert temp_backup.exists() + + # Testing `Table.list_backups()` method + temp_table_backup = temp_table.list_backups()[0] + assert temp_backup_id == temp_table_backup.backup_id + assert data_cluster_id == temp_table_backup.cluster + assert expire == temp_table_backup.expire_time.seconds + assert ( + temp_table_backup.encryption_info.encryption_type + == enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + + # Testing `Backup.update_expire_time()` method + expire += 3600 # A one-hour change in the `expire_time` parameter + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) + + # Testing `Backup.get()` method + temp_table_backup = temp_backup.get() + assert test.seconds == DatetimeWithNanoseconds.timestamp( + temp_table_backup.expire_time + ) + + # Testing `Table.restore()` and `Backup.retore()` methods + restored_table_id = "test-backup-table-restored" + restored_table = data_instance_populated.table(restored_table_id) + local_restore_op = temp_table.restore( + restored_table_id, cluster_id=data_cluster_id, backup_id=temp_backup_id + ) + local_restore_op.result(timeout=30) + tables = data_instance_populated.list_tables() + assert restored_table in tables + restored_table.delete() + + # Testing `Backup.restore()` into a different instance: + # Setting up another instance... + alt_instance_id = f"gcp-alt-{unique_suffix}" + alt_cluster_id = f"{alt_instance_id}-cluster" + alt_instance = admin_client.instance(alt_instance_id, labels=instance_labels) + alt_cluster = alt_instance.cluster( + cluster_id=alt_cluster_id, location_id=location_id, serve_nodes=1, + ) + create_op = alt_instance.create(clusters=[alt_cluster]) + instances_to_delete.append(alt_instance) + create_op.result(timeout=30) + + # Testing `restore()`... + restore_op = temp_backup.restore(restored_table_id, alt_instance_id) + restore_op.result(timeout=30) + restored_table = alt_instance.table(restored_table_id) + assert restored_table in alt_instance.list_tables() + restored_table.delete() From ed54fcd505e15625d3931571b007df6f94ce688b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 22 Jul 2021 10:36:15 -0400 Subject: [PATCH 04/10] tests: move data API systests into a separate module --- tests/system/test_data_api.py | 401 ++++++++++++++++++++++++++++++++++ tests/system/test_monolith.py | 302 ------------------------- 2 files changed, 401 insertions(+), 302 deletions(-) create mode 100644 tests/system/test_data_api.py diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py new file mode 100644 index 000000000..7b04f5c83 --- /dev/null +++ b/tests/system/test_data_api.py @@ -0,0 +1,401 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator + +import pytest + +COLUMN_FAMILY_ID1 = "col-fam-id1" +COLUMN_FAMILY_ID2 = "col-fam-id2" +COL_NAME1 = b"col-name1" +COL_NAME2 = b"col-name2" +COL_NAME3 = b"col-name3-but-other-fam" +CELL_VAL1 = b"cell-val" +CELL_VAL2 = b"cell-val-newer" +CELL_VAL3 = b"altcol-cell-val" +CELL_VAL4 = b"foo" +ROW_KEY = b"row-key" +ROW_KEY_ALT = b"row-key-alt" + + +@pytest.fixture(scope="module") +def data_table_id(): + return "test-data-api" + + +@pytest.fixture(scope="module") +def data_table(data_instance_populated, data_table_id): + table = data_instance_populated.table(data_table_id) + table.create() + table.column_family(COLUMN_FAMILY_ID1).create() + table.column_family(COLUMN_FAMILY_ID2).create() + + yield table + + table.delete() + + +@pytest.fixture(scope="function") +def rows_to_delete(): + rows_to_delete = [] + + yield rows_to_delete + + for row in rows_to_delete: + row.clear() + row.delete() + row.commit() + + +def test_table_read_rows_filter_millis(data_table): + from google.cloud.bigtable import row_filters + + end = datetime.datetime.now() + start = end - datetime.timedelta(minutes=60) + timestamp_range = row_filters.TimestampRange(start=start, end=end) + timefilter = row_filters.TimestampRangeFilter(timestamp_range) + row_data = data_table.read_rows(filter_=timefilter) + row_data.consume_all() + + +def test_table_mutate_rows(data_table, rows_to_delete): + row1 = data_table.direct_row(ROW_KEY) + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row1.commit() + rows_to_delete.append(row1) + + row2 = data_table.direct_row(ROW_KEY_ALT) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) + row2.commit() + rows_to_delete.append(row2) + + # Change the contents + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) + rows = [row1, row2] + + statuses = data_table.mutate_rows(rows) + assert len(statuses) == len(rows) + for status in statuses: + assert status.code == 0 + + # Check the contents + row1_data = data_table.read_row(ROW_KEY) + assert row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value == CELL_VAL3 + + row2_data = data_table.read_row(ROW_KEY_ALT) + assert row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value == CELL_VAL4 + + +def test_table_truncate(data_table, rows_to_delete): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + rows_to_delete.append(row) + + data_table.truncate(timeout=200) + + for row in data_table.read_rows(): + assert row.row_key.decode("utf-8") not in row_keys + + +def test_table_drop_by_prefix(data_table, rows_to_delete): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + rows_to_delete.append(row) + + data_table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) + + remaining_row_keys = [ + row_key for row_key in row_keys if not row_key.startswith(b"row_key_pr") + ] + expected_rows_count = len(remaining_row_keys) + found_rows_count = 0 + + for row in data_table.read_rows(): + if row.row_key in row_keys: + found_rows_count += 1 + + assert expected_rows_count == found_rows_count + + +def test_table_read_rows_w_row_set(data_table, rows_to_delete): + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + b"row_key_7", + b"row_key_8", + b"row_key_9", + ] + + rows = [] + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + rows_to_delete.append(row) + + data_table.mutate_rows(rows) + + row_range = RowRange(start_key=b"row_key_3", end_key=b"row_key_7") + row_set = RowSet() + row_set.add_row_range(row_range) + row_set.add_row_key(b"row_key_1") + + found_rows = data_table.read_rows(row_set=row_set) + + found_row_keys = [row.row_key for row in found_rows] + expected_row_keys = [ + row_key for row_key in row_keys[:6] if not row_key.endswith(b"_2") + ] + assert found_row_keys == expected_row_keys + + +def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete): + from google.cloud.bigtable.row_set import RowSet + + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"sample_row_key_1", + b"sample_row_key_2", + ] + + rows = [] + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + rows_to_delete.append(row) + data_table.mutate_rows(rows) + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + + expected_row_keys = [row_key for row_key in row_keys if row_key.startswith(b"row")] + found_rows = data_table.read_rows(row_set=row_set) + found_row_keys = [row.row_key for row in found_rows] + assert found_row_keys == expected_row_keys + + +def test_table_read_row_large_cell(data_table, rows_to_delete, not_in_emulator): + # Maximum gRPC received message size for emulator is 4194304 bytes. + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + + number_of_bytes = 10 * 1024 * 1024 + data = b"1" * number_of_bytes # 10MB of 1's. + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) + row.commit() + + # Read back the contents of the row. + row_data = data_table.read_row(ROW_KEY) + assert row_data.row_key == ROW_KEY + + cell = row_data.cells[COLUMN_FAMILY_ID1] + column = cell[COL_NAME1] + assert len(column) == 1 + assert column[0].value == data + + +def _write_to_row(row1, row2, row3, row4): + from google.cloud._helpers import _datetime_from_microseconds + from google.cloud._helpers import _microseconds_from_datetime + from google.cloud._helpers import UTC + from google.cloud.bigtable.row_data import Cell + + timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp1_micros = _microseconds_from_datetime(timestamp1) + # Truncate to millisecond granularity. + timestamp1_micros -= timestamp1_micros % 1000 + timestamp1 = _datetime_from_microseconds(timestamp1_micros) + # 1000 microseconds is a millisecond + timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp2_micros = _microseconds_from_datetime(timestamp2) + timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp3_micros = _microseconds_from_datetime(timestamp3) + timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + timestamp4_micros = _microseconds_from_datetime(timestamp4) + + if row1 is not None: + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) + if row2 is not None: + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) + if row3 is not None: + row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) + if row4 is not None: + row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) + + # Create the cells we will check. + cell1 = Cell(CELL_VAL1, timestamp1_micros) + cell2 = Cell(CELL_VAL2, timestamp2_micros) + cell3 = Cell(CELL_VAL3, timestamp3_micros) + cell4 = Cell(CELL_VAL4, timestamp4_micros) + + return cell1, cell2, cell3, cell4 + + +def test_table_read_row(data_table, rows_to_delete): + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + cell1, cell2, cell3, cell4 = _write_to_row(row, row, row, row) + row.commit() + + partial_row_data = data_table.read_row(ROW_KEY) + + assert partial_row_data.row_key == ROW_KEY + + # Check the cells match. + ts_attr = operator.attrgetter("timestamp") + expected_row_contents = { + COLUMN_FAMILY_ID1: { + COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), + COL_NAME2: [cell3], + }, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + assert partial_row_data.cells == expected_row_contents + + +def test_table_read_rows(data_table, rows_to_delete): + from google.cloud.bigtable.row_data import PartialRowData + + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + row_alt = data_table.direct_row(ROW_KEY_ALT) + rows_to_delete.append(row_alt) + + cell1, cell2, cell3, cell4 = _write_to_row(row, row_alt, row, row_alt) + row.commit() + row_alt.commit() + + rows_data = data_table.read_rows() + assert rows_data.rows == {} + rows_data.consume_all() + + # NOTE: We should refrain from editing protected data on instances. + # Instead we should make the values public or provide factories + # for constructing objects with them. + row_data = PartialRowData(ROW_KEY) + row_data._chunks_encountered = True + row_data._committed = True + row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} + + row_alt_data = PartialRowData(ROW_KEY_ALT) + row_alt_data._chunks_encountered = True + row_alt_data._committed = True + row_alt_data._cells = { + COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + + expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} + assert rows_data.rows == expected_rows + + +def test_read_with_label_applied(data_table, rows_to_delete, not_in_emulator): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowFilterUnion + + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + + cell1, _, cell3, _ = _write_to_row(row, None, row, None) + row.commit() + + # Combine a label with column 1. + label1 = "label-red" + label1_filter = ApplyLabelFilter(label1) + col1_filter = ColumnQualifierRegexFilter(COL_NAME1) + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Combine a label with column 2. + label2 = "label-blue" + label2_filter = ApplyLabelFilter(label2) + col2_filter = ColumnQualifierRegexFilter(COL_NAME2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + partial_row_data = data_table.read_row(ROW_KEY, filter_=row_filter) + assert partial_row_data.row_key == ROW_KEY + + cells_returned = partial_row_data.cells + col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) + # Make sure COLUMN_FAMILY_ID1 was the only key. + assert len(cells_returned) == 0 + + (cell1_new,) = col_fam1.pop(COL_NAME1) + (cell3_new,) = col_fam1.pop(COL_NAME2) + # Make sure COL_NAME1 and COL_NAME2 were the only keys. + assert len(col_fam1) == 0 + + # Check that cell1 has matching values and gained a label. + assert cell1_new.value == cell1.value + assert cell1_new.timestamp == cell1.timestamp + assert cell1.labels == [] + assert cell1_new.labels == [label1] + + # Check that cell3 has matching values and gained a label. + assert cell3_new.value == cell3.value + assert cell3_new.timestamp == cell3.timestamp + assert cell3.labels == [] + assert cell3_new.labels == [label2] + + +def test_access_with_non_admin_client(data_client, data_instance_id, data_table_id): + instance = data_client.instance(data_instance_id) + table = instance.table(data_table_id) + assert table.read_row("nonesuch") is None # no raise diff --git a/tests/system/test_monolith.py b/tests/system/test_monolith.py index d9ef4d5be..3b7831f8e 100644 --- a/tests/system/test_monolith.py +++ b/tests/system/test_monolith.py @@ -13,7 +13,6 @@ # limitations under the License. import datetime -import operator import os import unittest @@ -28,14 +27,7 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import UTC from google.cloud.bigtable.client import Client -from google.cloud.bigtable.row_filters import ApplyLabelFilter -from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter -from google.cloud.bigtable.row_filters import RowFilterChain -from google.cloud.bigtable.row_filters import RowFilterUnion from google.cloud.bigtable.row_data import Cell -from google.cloud.bigtable.row_data import PartialRowData -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.row_set import RowRange # from google.cloud.bigtable_admin_v2.gapic import ( # bigtable_table_admin_client_config as table_admin_config, @@ -204,297 +196,3 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): cell3 = Cell(CELL_VAL3, timestamp3_micros) cell4 = Cell(CELL_VAL4, timestamp4_micros) return cell1, cell2, cell3, cell4 - - def test_timestamp_filter_millisecond_granularity(self): - from google.cloud.bigtable import row_filters - - end = datetime.datetime.now() - start = end - datetime.timedelta(minutes=60) - timestamp_range = row_filters.TimestampRange(start=start, end=end) - timefilter = row_filters.TimestampRangeFilter(timestamp_range) - row_data = self._table.read_rows(filter_=timefilter) - row_data.consume_all() - - def test_mutate_rows(self): - row1 = self._table.row(ROW_KEY) - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row1.commit() - self.rows_to_delete.append(row1) - row2 = self._table.row(ROW_KEY_ALT) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) - row2.commit() - self.rows_to_delete.append(row2) - - # Change the contents - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) - rows = [row1, row2] - statuses = self._table.mutate_rows(rows) - result = [status.code for status in statuses] - expected_result = [0, 0] - self.assertEqual(result, expected_result) - - # Check the contents - row1_data = self._table.read_row(ROW_KEY) - self.assertEqual( - row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 - ) - row2_data = self._table.read_row(ROW_KEY_ALT) - self.assertEqual( - row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 - ) - - def test_truncate_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.truncate(timeout=200) - - read_rows = self._table.yield_rows() - - for row in read_rows: - self.assertNotIn(row.row_key.decode("utf-8"), row_keys) - - def test_drop_by_prefix_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) - - read_rows = self._table.yield_rows() - expected_rows_count = 5 - read_rows_count = 0 - - for row in read_rows: - if row.row_key in row_keys: - read_rows_count += 1 - - self.assertEqual(expected_rows_count, read_rows_count) - - def test_yield_rows_with_row_set(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - b"row_key_7", - b"row_key_8", - b"row_key_9", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - row_set.add_row_key(b"row_key_1") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_add_row_range_by_prefix_from_keys(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"sample_row_key_1", - b"sample_row_key_2", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range_with_prefix("row") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_read_large_cell_limit(self): - self._maybe_emulator_skip( - "Maximum gRPC received message size for emulator is 4194304 bytes." - ) - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - number_of_bytes = 10 * 1024 * 1024 - data = b"1" * number_of_bytes # 10MB of 1's. - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - cell = partial_row_data.cells[COLUMN_FAMILY_ID1] - column = cell[COL_NAME1] - self.assertEqual(len(column), 1) - self.assertEqual(column[0].value, data) - - def test_read_row(self): - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - # Check the cells match. - ts_attr = operator.attrgetter("timestamp") - expected_row_contents = { - COLUMN_FAMILY_ID1: { - COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), - COL_NAME2: [cell3], - }, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - self.assertEqual(partial_row_data.cells, expected_row_contents) - - def test_read_rows(self): - row = self._table.row(ROW_KEY) - row_alt = self._table.row(ROW_KEY_ALT) - self.rows_to_delete.extend([row, row_alt]) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) - row.commit() - row_alt.commit() - - rows_data = self._table.read_rows() - self.assertEqual(rows_data.rows, {}) - rows_data.consume_all() - - # NOTE: We should refrain from editing protected data on instances. - # Instead we should make the values public or provide factories - # for constructing objects with them. - row_data = PartialRowData(ROW_KEY) - row_data._chunks_encountered = True - row_data._committed = True - row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} - - row_alt_data = PartialRowData(ROW_KEY_ALT) - row_alt_data._chunks_encountered = True - row_alt_data._committed = True - row_alt_data._cells = { - COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - - expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} - self.assertEqual(rows_data.rows, expected_rows) - - def test_read_with_label_applied(self): - self._maybe_emulator_skip("Labels not supported by Bigtable emulator") - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, _, cell3, _ = self._write_to_row(row, None, row) - row.commit() - - # Combine a label with column 1. - label1 = "label-red" - label1_filter = ApplyLabelFilter(label1) - col1_filter = ColumnQualifierRegexFilter(COL_NAME1) - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Combine a label with column 2. - label2 = "label-blue" - label2_filter = ApplyLabelFilter(label2) - col2_filter = ColumnQualifierRegexFilter(COL_NAME2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - cells_returned = partial_row_data.cells - col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) - # Make sure COLUMN_FAMILY_ID1 was the only key. - self.assertEqual(len(cells_returned), 0) - - (cell1_new,) = col_fam1.pop(COL_NAME1) - (cell3_new,) = col_fam1.pop(COL_NAME2) - # Make sure COL_NAME1 and COL_NAME2 were the only keys. - self.assertEqual(len(col_fam1), 0) - - # Check that cell1 has matching values and gained a label. - self.assertEqual(cell1_new.value, cell1.value) - self.assertEqual(cell1_new.timestamp, cell1.timestamp) - self.assertEqual(cell1.labels, []) - self.assertEqual(cell1_new.labels, [label1]) - - # Check that cell3 has matching values and gained a label. - self.assertEqual(cell3_new.value, cell3.value) - self.assertEqual(cell3_new.timestamp, cell3.timestamp) - self.assertEqual(cell3.labels, []) - self.assertEqual(cell3_new.labels, [label2]) - - def test_access_with_non_admin_client(self): - client = Client(admin=False) - instance = client.instance(INSTANCE_ID_DATA) - table = instance.table(self._table.table_id) - self.assertIsNone(table.read_row("nonesuch")) From 00da0aef696f1c3351f4f7a04cd559fbfee4836b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 22 Jul 2021 11:47:59 -0400 Subject: [PATCH 05/10] tests: remove remaining monolith fixtures Closes #391. --- tests/system/test_instance_admin.py | 4 +- tests/system/test_monolith.py | 198 ---------------------------- 2 files changed, 1 insertion(+), 201 deletions(-) delete mode 100644 tests/system/test_monolith.py diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py index 9c08aaec1..3a60facb8 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/test_instance_admin.py @@ -527,9 +527,7 @@ def test_instance_update_w_type( labels=instance_labels, ) alt_cluster_id = f"{alt_instance_id}-cluster" - cluster = instance.cluster( - alt_cluster_id, location_id=location_id, - ) + cluster = instance.cluster(alt_cluster_id, location_id=location_id,) operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) diff --git a/tests/system/test_monolith.py b/tests/system/test_monolith.py deleted file mode 100644 index 3b7831f8e..000000000 --- a/tests/system/test_monolith.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import os -import unittest - -from google.api_core.exceptions import TooManyRequests -from google.cloud.environment_vars import BIGTABLE_EMULATOR -from test_utils.retry import RetryErrors - -# from test_utils.system import EmulatorCreds -from test_utils.system import unique_resource_id - -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import UTC -from google.cloud.bigtable.client import Client -from google.cloud.bigtable.row_data import Cell - -# from google.cloud.bigtable_admin_v2.gapic import ( -# bigtable_table_admin_client_config as table_admin_config, -# ) - -UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = "us-central1-c" -INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX -INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX -TABLE_ID = "google-cloud-python-test-table" -CLUSTER_ID = INSTANCE_ID + "-cluster" -CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" -SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = "col-fam-id1" -COLUMN_FAMILY_ID2 = "col-fam-id2" -COL_NAME1 = b"col-name1" -COL_NAME2 = b"col-name2" -COL_NAME3 = b"col-name3-but-other-fam" -CELL_VAL1 = b"cell-val" -CELL_VAL2 = b"cell-val-newer" -CELL_VAL3 = b"altcol-cell-val" -CELL_VAL4 = b"foo" -ROW_KEY = b"row-key" -ROW_KEY_ALT = b"row-key-alt" -EXISTING_INSTANCES = [] -LABEL_KEY = "python-system" -label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(label_stamp)} -KMS_KEY_NAME = os.environ.get("KMS_KEY_NAME", None) - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - INSTANCE_DATA = None - CLUSTER = None - CLUSTER_DATA = None - IN_EMULATOR = False - - -def _retry_on_unavailable(exc): - """Retry only errors whose status code is 'UNAVAILABLE'.""" - from grpc import StatusCode - - return exc.code() == StatusCode.UNAVAILABLE - - -retry_429 = RetryErrors(TooManyRequests, max_tries=9) - - -def setUpModule(): - from google.cloud.exceptions import GrpcRendezvous - from google.cloud.bigtable.enums import Instance - - # See: https://github.com/googleapis/google-cloud-python/issues/5928 - # interfaces = table_admin_config.config["interfaces"] - # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - # methods = iface_config["methods"] - # create_table = methods["CreateTable"] - # create_table["timeout_millis"] = 90000 - - Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None - - # Previously we created clients using a mock EmulatorCreds when targeting - # an emulator. - Config.CLIENT = Client(admin=True) - - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - ) - Config.INSTANCE_DATA = Config.CLIENT.instance( - INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS - ) - Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID, - ) - - if not Config.IN_EMULATOR: - retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) - instances, failed_locations = retry(Config.CLIENT.list_instances)() - - if len(failed_locations) != 0: - raise ValueError("List instances failed in module set up.") - - EXISTING_INSTANCES[:] = instances - - # After listing, create the test instances. - admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - admin_op.result(timeout=30) - data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - data_op.result(timeout=30) - - -def tearDownModule(): - if not Config.IN_EMULATOR: - retry_429(Config.INSTANCE.delete)() - retry_429(Config.INSTANCE_DATA.delete)() - - -class TestDataAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = table = Config.INSTANCE_DATA.table("test-data-api") - table.create() - table.column_family(COLUMN_FAMILY_ID1).create() - table.column_family(COLUMN_FAMILY_ID2).create() - - @classmethod - def tearDownClass(cls): - # Will also delete any data contained in the table. - cls._table.delete() - - def _maybe_emulator_skip(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def setUp(self): - self.rows_to_delete = [] - - def tearDown(self): - for row in self.rows_to_delete: - row.clear() - row.delete() - row.commit() - - def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): - timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) - timestamp1_micros = _microseconds_from_datetime(timestamp1) - # Truncate to millisecond granularity. - timestamp1_micros -= timestamp1_micros % 1000 - timestamp1 = _datetime_from_microseconds(timestamp1_micros) - # 1000 microseconds is a millisecond - timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) - timestamp2_micros = _microseconds_from_datetime(timestamp2) - timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) - timestamp3_micros = _microseconds_from_datetime(timestamp3) - timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) - timestamp4_micros = _microseconds_from_datetime(timestamp4) - - if row1 is not None: - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) - if row2 is not None: - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) - if row3 is not None: - row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) - if row4 is not None: - row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) - - # Create the cells we will check. - cell1 = Cell(CELL_VAL1, timestamp1_micros) - cell2 = Cell(CELL_VAL2, timestamp2_micros) - cell3 = Cell(CELL_VAL3, timestamp3_micros) - cell4 = Cell(CELL_VAL4, timestamp4_micros) - return cell1, cell2, cell3, cell4 From de1f8a04fb4d85310658c2f3dfc286d69708d583 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 11 Aug 2021 15:25:27 -0400 Subject: [PATCH 06/10] test: harden flaky 'w_two_clusters' systest against GRPC Unavailable Ported from PR #385. Fixes #381. --- tests/system/_helpers.py | 12 ++++++++++++ tests/system/test_instance_admin.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 3a5d18edc..f6895a51f 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -14,7 +14,9 @@ import datetime +import grpc from google.api_core import exceptions +from google.cloud import exceptions as core_exceptions from google.cloud._helpers import UTC from test_utils import retry @@ -25,6 +27,16 @@ retry_until_false = retry.RetryResult(lambda result: not result) +def _retry_on_unavailable(exc): + """Retry only errors whose status code is 'UNAVAILABLE'.""" + return exc.code() == grpc.StatusCode.UNAVAILABLE + + +retry_grpc_unavailable = retry.RetryErrors( + core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, +) + + def label_stamp(): return ( datetime.datetime.utcnow() diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py index 3a60facb8..78928457c 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/test_instance_admin.py @@ -264,7 +264,7 @@ def test_instance_create_w_two_clusters( temp_table_id = "test-get-cluster-states" temp_table = instance.table(temp_table_id) - temp_table.create() + _helpers.retry_grpc_unavailable(temp_table.create)() EncryptionType = enums.EncryptionInfo.EncryptionType encryption_info = temp_table.get_encryption_info() From 54bf4c1ae0800297f433a5303496243aa41f4bf5 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 15 Sep 2021 13:52:38 -0400 Subject: [PATCH 07/10] chore: comment on non-instance-admin support of emulator --- tests/system/conftest.py | 4 ++++ tests/system/test_instance_admin.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index 9c3b3fcd5..778cf8c94 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -109,6 +109,8 @@ def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): @pytest.fixture(scope="session") def admin_instance_populated(admin_instance, admin_cluster, in_emulator): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: operation = admin_instance.create(clusters=[admin_cluster]) operation.result(timeout=30) @@ -144,6 +146,8 @@ def data_instance_populated( serve_nodes, in_emulator, ): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: instance = admin_client.instance(data_instance_id, labels=instance_labels) cluster = instance.cluster( diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py index 78928457c..c5f7b525e 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/test_instance_admin.py @@ -106,6 +106,9 @@ def test_client_list_instances(admin_client, admin_instance_populated, not_in_em def test_instance_exists_hit(admin_instance_populated): + # Emulator does not support instance admin operations (create / delete). + # It allows connecting with *any* project / instance name. + # See: https://cloud.google.com/bigtable/docs/emulator assert admin_instance_populated.exists() From 4b11ce896c7501a915349060ae8fa9172050351b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 15 Sep 2021 14:13:07 -0400 Subject: [PATCH 08/10] tests: assert semantics of 'Table.truncate' --- tests/system/test_data_api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py index 7b04f5c83..82768928e 100644 --- a/tests/system/test_data_api.py +++ b/tests/system/test_data_api.py @@ -121,8 +121,7 @@ def test_table_truncate(data_table, rows_to_delete): data_table.truncate(timeout=200) - for row in data_table.read_rows(): - assert row.row_key.decode("utf-8") not in row_keys + assert list(data_table.read_rows()) == [] def test_table_drop_by_prefix(data_table, rows_to_delete): From bec068ccc69d7dee89b55ed6b1c31eab07f7a088 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 15 Sep 2021 14:20:59 -0400 Subject: [PATCH 09/10] tests: factor out table population helper --- tests/system/test_data_api.py | 40 ++++++++++------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py index 82768928e..0162ecdca 100644 --- a/tests/system/test_data_api.py +++ b/tests/system/test_data_api.py @@ -99,6 +99,13 @@ def test_table_mutate_rows(data_table, rows_to_delete): assert row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value == CELL_VAL4 +def _populate_table(data_table, rows_to_delete, row_keys): + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + rows_to_delete.append(row) + def test_table_truncate(data_table, rows_to_delete): row_keys = [ b"row_key_1", @@ -112,12 +119,7 @@ def test_table_truncate(data_table, rows_to_delete): b"row_key_pr_4", b"row_key_pr_5", ] - - for row_key in row_keys: - row = data_table.direct_row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - rows_to_delete.append(row) + _populate_table(data_table, rows_to_delete, row_keys) data_table.truncate(timeout=200) @@ -137,12 +139,7 @@ def test_table_drop_by_prefix(data_table, rows_to_delete): b"row_key_pr_4", b"row_key_pr_5", ] - - for row_key in row_keys: - row = data_table.direct_row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - rows_to_delete.append(row) + _populate_table(data_table, rows_to_delete, row_keys) data_table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) @@ -174,15 +171,7 @@ def test_table_read_rows_w_row_set(data_table, rows_to_delete): b"row_key_8", b"row_key_9", ] - - rows = [] - for row_key in row_keys: - row = data_table.direct_row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - rows_to_delete.append(row) - - data_table.mutate_rows(rows) + _populate_table(data_table, rows_to_delete, row_keys) row_range = RowRange(start_key=b"row_key_3", end_key=b"row_key_7") row_set = RowSet() @@ -209,14 +198,7 @@ def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete): b"sample_row_key_1", b"sample_row_key_2", ] - - rows = [] - for row_key in row_keys: - row = data_table.direct_row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - rows_to_delete.append(row) - data_table.mutate_rows(rows) + _populate_table(data_table, rows_to_delete, row_keys) row_set = RowSet() row_set.add_row_range_with_prefix("row") From 31caa882ccc6592e6464920876de962449b3842a Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 15 Sep 2021 18:23:15 +0000 Subject: [PATCH 10/10] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- tests/system/test_data_api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py index 0162ecdca..2137aa2e4 100644 --- a/tests/system/test_data_api.py +++ b/tests/system/test_data_api.py @@ -106,6 +106,7 @@ def _populate_table(data_table, rows_to_delete, row_keys): row.commit() rows_to_delete.append(row) + def test_table_truncate(data_table, rows_to_delete): row_keys = [ b"row_key_1",