From e55ca39eecc71495ec7fb2fe0fd4ecd7b873e3f8 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 14 Dec 2023 11:06:26 +0200 Subject: [PATCH] feature(pre-commit): replace pylint with ruff #5799 Ruff is rust base linter for python that work incredibly fast since pylint speed is causing use issue on SCT CI we should start using ruff, for now it uses the pylint set of rules it has implemeted and we can slow extand it's configuration to use more of the available rules set it has Ref: https://github.com/charliermarsh/ruff Ref: https://www.youtube.com/watch?v=jeoL4qsSLbE --- .pre-commit-config.yaml | 7 +- docker/alternator-dns/dns_server.py | 4 +- docker/env/version | 2 +- functional_tests/scylla_operator/conftest.py | 2 +- longevity_test.py | 2 +- mgmt_cli_test.py | 1 + mgmt_upgrade_test.py | 5 +- ...rmance_regression_row_level_repair_test.py | 2 +- pyproject.toml | 16 + requirements.in | 2 +- requirements.txt | 441 +++++++++--------- sct.py | 10 +- sdcm/audit.py | 2 +- sdcm/cluster.py | 25 +- sdcm/cluster_aws.py | 8 +- sdcm/cluster_azure.py | 2 +- sdcm/cluster_gce.py | 10 +- sdcm/cluster_k8s/__init__.py | 2 +- sdcm/cluster_k8s/eks.py | 4 +- sdcm/cluster_k8s/gke.py | 4 +- sdcm/cluster_k8s/mini_k8s.py | 5 +- sdcm/coredump.py | 4 +- sdcm/cql_stress_cassandra_stress_thread.py | 2 +- sdcm/db_log_reader.py | 6 +- sdcm/db_stats.py | 19 +- sdcm/ec2_client.py | 2 +- sdcm/fill_db_data.py | 6 +- sdcm/gemini_thread.py | 2 +- sdcm/mgmt/cli.py | 2 +- sdcm/mgmt/operator.py | 4 +- sdcm/monitorstack/__init__.py | 2 +- sdcm/nemesis.py | 31 +- sdcm/prometheus.py | 4 +- sdcm/provision/azure/ip_provider.py | 2 +- .../azure/network_interface_provider.py | 2 +- .../azure/virtual_machine_provider.py | 4 +- .../azure/virtual_network_provider.py | 4 +- sdcm/provision/scylla_yaml/scylla_yaml.py | 2 +- sdcm/remote/libssh2_client/exceptions.py | 7 +- sdcm/results_analyze/__init__.py | 6 +- sdcm/results_analyze/test.py | 10 +- sdcm/sct_config.py | 14 +- sdcm/sct_events/events_processes.py | 2 +- sdcm/sct_runner.py | 7 +- sdcm/send_email.py | 2 +- sdcm/stress/latte_thread.py | 6 +- sdcm/stress_thread.py | 6 +- sdcm/tester.py | 25 +- sdcm/utils/cdc/options.py | 9 +- sdcm/utils/common.py | 17 +- sdcm/utils/data_validator.py | 48 +- sdcm/utils/docker_utils.py | 4 +- sdcm/utils/gce_utils.py | 2 +- sdcm/utils/get_username.py | 2 +- sdcm/utils/k8s/__init__.py | 8 +- sdcm/utils/k8s/chaos_mesh.py | 2 +- sdcm/utils/latency.py | 2 +- sdcm/utils/ldap.py | 2 +- sdcm/utils/log.py | 6 +- .../perf_simple_query_reporter.py | 5 +- sdcm/utils/properties.py | 8 +- sdcm/utils/remote_logger.py | 2 +- sdcm/utils/sstable/sstable_utils.py | 1 + sdcm/utils/toppartition_util.py | 4 +- sdcm/utils/version_utils.py | 10 +- sdcm/ycsb_thread.py | 6 +- sla_per_user_system_test.py | 2 +- unit_tests/lib/fake_provisioner.py | 2 +- unit_tests/provisioner/fake_azure_service.py | 40 +- unit_tests/test_sct_events_base.py | 8 +- unit_tests/test_version_utils.py | 13 +- upgrade_schema_test.py | 4 +- upgrade_test.py | 6 +- .../build_system/create_test_release_jobs.py | 2 +- utils/cloud_cleanup/azure/clean_azure.py | 9 +- 75 files changed, 476 insertions(+), 487 deletions(-) create mode 100644 pyproject.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 27f69fbee3e..b32798cb435 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,11 +39,10 @@ repos: types: [python] exclude: '\.sh$' - - id: pylint - name: pylint - entry: pylint -j 2 -d consider-using-f-string + - id: ruff + name: ruff + entry: ruff check --fix language: system - exclude: ^docker/alternator-dns/.*$ types: [python] - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook diff --git a/docker/alternator-dns/dns_server.py b/docker/alternator-dns/dns_server.py index e1537738f4a..e90d3b11a0b 100644 --- a/docker/alternator-dns/dns_server.py +++ b/docker/alternator-dns/dns_server.py @@ -20,8 +20,8 @@ def livenodes_update(): - global alternator_port - global livenodes + global alternator_port # noqa: PLW0602 + global livenodes # noqa: PLW0603 while True: # Contact one of the already known nodes by random, to fetch a new # list of known nodes. diff --git a/docker/env/version b/docker/env/version index 029831272fc..4de3bf6a734 100644 --- a/docker/env/version +++ b/docker/env/version @@ -1 +1 @@ -1.69-update-scylla-driver-3.26.8 +1.70-introduce-ruff diff --git a/functional_tests/scylla_operator/conftest.py b/functional_tests/scylla_operator/conftest.py index 67e543ec637..170b18756af 100644 --- a/functional_tests/scylla_operator/conftest.py +++ b/functional_tests/scylla_operator/conftest.py @@ -70,7 +70,7 @@ def publish_test_result(): @pytest.fixture(autouse=True, scope='package', name="tester") def fixture_tester() -> ScyllaOperatorFunctionalClusterTester: - global TESTER # pylint: disable=global-statement + global TESTER # pylint: disable=global-statement # noqa: PLW0603 os.chdir(sct_abs_path()) tester_inst = ScyllaOperatorFunctionalClusterTester() TESTER = tester_inst # putting tester global, so we can report skipped test (one with mark.skip) diff --git a/longevity_test.py b/longevity_test.py index 71e0e06f1e7..29e7fe2dcfa 100644 --- a/longevity_test.py +++ b/longevity_test.py @@ -329,7 +329,7 @@ def chunks(_list, chunk_size): self._pre_create_templated_user_schema(batch_start=extra_tables_idx, batch_end=extra_tables_idx+num_of_newly_created_tables) for i in range(num_of_newly_created_tables): - batch += self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile) + batch.append(self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile)) nodes_ips = self.all_node_ips_for_stress_command for params in batch: diff --git a/mgmt_cli_test.py b/mgmt_cli_test.py index 7cfcf4cb4c7..e4d6afc88e7 100644 --- a/mgmt_cli_test.py +++ b/mgmt_cli_test.py @@ -848,6 +848,7 @@ def test_repair_multiple_keyspace_types(self): # pylint: disable=invalid-name keyspace_repair_percentage = per_keyspace_progress.get(keyspace_name, None) assert keyspace_repair_percentage is not None, \ "The keyspace {} was not included in the repair!".format(keyspace_name) + assert keyspace_repair_percentage == 100, \ "The repair of the keyspace {} stopped at {}%".format( keyspace_name, keyspace_repair_percentage) diff --git a/mgmt_upgrade_test.py b/mgmt_upgrade_test.py index a456d55e88e..b05a254e167 100644 --- a/mgmt_upgrade_test.py +++ b/mgmt_upgrade_test.py @@ -277,9 +277,8 @@ def validate_previous_task_details(task, previous_task_details): # and as a result it could be a BIT imprecise if abs(delta.total_seconds()) > 60: mismatched_details_name_list.append(detail_name) - else: - if current_value != previous_task_details[detail_name]: - mismatched_details_name_list.append(detail_name) + elif current_value != previous_task_details[detail_name]: + mismatched_details_name_list.append(detail_name) complete_error_description = _create_mismatched_details_error_message(previous_task_details, current_task_details, mismatched_details_name_list) diff --git a/performance_regression_row_level_repair_test.py b/performance_regression_row_level_repair_test.py index 3b5bfc7ee71..00be4fcf354 100644 --- a/performance_regression_row_level_repair_test.py +++ b/performance_regression_row_level_repair_test.py @@ -80,7 +80,7 @@ def preload_data(self, consistency_level=None): for stress_cmd in prepare_write_cmd: if consistency_level: - stress_cmd = self._update_cl_in_stress_cmd( + stress_cmd = self._update_cl_in_stress_cmd( # noqa: PLW2901 str_stress_cmd=stress_cmd, consistency_level=consistency_level) params.update({'stress_cmd': stress_cmd}) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000000..03f711db2fe --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[tool.ruff] +lint.select = ["PL"] + +lint.ignore = ["E501", "PLR2004"] + +target-version = "py310" + +force-exclude = true +line-length = 240 +respect-gitignore = true + + +[tool.ruff.lint.pylint] +max-args = 12 +max-statements = 100 +max-branches = 24 diff --git a/requirements.in b/requirements.in index dc036ffc1f3..421892b36d6 100644 --- a/requirements.in +++ b/requirements.in @@ -30,7 +30,7 @@ python-jenkins==1.7.0 ssh2-python==1.0.0 argus-alm==0.12.3 parameterized==0.8.1 -pylint==2.11.1 # Needed for pre-commit hooks +ruff==0.4.7 # Needed for pre-commit hooks autopep8==1.5.7 # Needed for pre-commit hooks kubernetes==24.2.0 packaging==21.3 diff --git a/requirements.txt b/requirements.txt index 415947a268b..a9c6e2444d5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -103,10 +103,6 @@ argus-alm==0.12.3 \ --hash=sha256:635f4aac99f6da0e04ed064cda49cac5a45741c7f848c10fbd717e1809278eaf \ --hash=sha256:9264a554c7f7b2f8cd364424ad043d0a48a70416f5a5f41fc2bb62030cec6f1b # via -r requirements.in -astroid==2.8.6 \ - --hash=sha256:5f6f75e45f15290e73b56f9dfde95b4bf96382284cde406ef4203e928335a495 \ - --hash=sha256:cd8326b424c971e7d87678609cf6275d22028afd37d6ac59c16d47f1245882f6 - # via pylint async-timeout==4.0.3 \ --hash=sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f \ --hash=sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028 @@ -134,9 +130,9 @@ azure-common==1.1.28 \ # azure-mgmt-resource # azure-mgmt-resourcegraph # azure-mgmt-subscription -azure-core==1.30.1 \ - --hash=sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f \ - --hash=sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74 +azure-core==1.30.2 \ + --hash=sha256:a14dc210efcd608821aa472d9fb8e8d035d29b68993819147bc290a8ac224472 \ + --hash=sha256:cf019c1ca832e96274ae85abd3d9f752397194d9fea3b41487290562ac8abe4a # via # azure-identity # azure-mgmt-core @@ -235,9 +231,9 @@ botocore==1.31.4 \ # awscli # boto3 # s3transfer -botocore-stubs==1.34.94 \ - --hash=sha256:64d80a3467e3b19939e9c2750af33328b3087f8f524998dbdf7ed168227f507d \ - --hash=sha256:b0345f55babd8b901c53804fc5c326a4a0bd2e23e3b71f9ea5d9f7663466e6ba +botocore-stubs==1.34.127 \ + --hash=sha256:8aabb7b22e2b19df94dd72bed6b849f2146452a2aa501554d619ef76287bb2d6 \ + --hash=sha256:f78543fe93c21634458090d85cce68edd0c994f87a615fc6e91af4d26e96717f # via boto3-stubs build==1.2.1 \ --hash=sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d \ @@ -423,39 +419,39 @@ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 # via awscli -cryptography==42.0.7 \ - --hash=sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55 \ - --hash=sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785 \ - --hash=sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b \ - --hash=sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886 \ - --hash=sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82 \ - --hash=sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1 \ - --hash=sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda \ - --hash=sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f \ - --hash=sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68 \ - --hash=sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60 \ - --hash=sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7 \ - --hash=sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd \ - --hash=sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582 \ - --hash=sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc \ - --hash=sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858 \ - --hash=sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b \ - --hash=sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2 \ - --hash=sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678 \ - --hash=sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13 \ - --hash=sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4 \ - --hash=sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8 \ - --hash=sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604 \ - --hash=sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477 \ - --hash=sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e \ - --hash=sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a \ - --hash=sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9 \ - --hash=sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14 \ - --hash=sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda \ - --hash=sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da \ - --hash=sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562 \ - --hash=sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2 \ - --hash=sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9 +cryptography==42.0.8 \ + --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \ + --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \ + --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \ + --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \ + --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \ + --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \ + --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \ + --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \ + --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \ + --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \ + --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \ + --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \ + --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \ + --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \ + --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \ + --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \ + --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \ + --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \ + --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \ + --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \ + --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \ + --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \ + --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \ + --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \ + --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \ + --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \ + --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \ + --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \ + --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \ + --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \ + --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \ + --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e # via # azure-identity # azure-storage-blob @@ -516,9 +512,9 @@ fido2==0.9.3 \ # via # ctap-keyring-device # gimme-aws-creds -filelock==3.14.0 \ - --hash=sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f \ - --hash=sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a +filelock==3.15.1 \ + --hash=sha256:58a2549afdf9e02e10720eaa4d4470f56386d7a6f72edd7d0596337af8ed7ad8 \ + --hash=sha256:71b3102950e91dfc1bb4209b64be4dc8854f40e5f534428d8684f953ac847fac # via virtualenv flatdict==4.0.1 \ --hash=sha256:cd32f08fd31ed21eb09ebc76f06b6bd12046a24f77beb1fd0281917e47f26742 @@ -632,9 +628,9 @@ google-api-python-client==2.93.0 \ --hash=sha256:62ee28e96031a10a1c341f226a75ac6a4f16bdb1d888dc8222b2cdca133d0031 \ --hash=sha256:f34abb671afd488bd19d30721ea20fb30d3796ddd825d6f91f26d8c718a9f07d # via -r requirements.in -google-auth==2.29.0 \ - --hash=sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360 \ - --hash=sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415 +google-auth==2.30.0 \ + --hash=sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5 \ + --hash=sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688 # via # google-api-core # google-api-python-client @@ -728,9 +724,9 @@ google-crc32c==1.5.0 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.7.0 \ - --hash=sha256:5f18f5fa9836f4b083162064a1c2c98c17239bfda9ca50ad970ccf905f3e625b \ - --hash=sha256:79543cfe433b63fd81c0844b7803aba1bb8950b47bedf7d980c38fa123937e08 +google-resumable-media==2.7.1 \ + --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \ + --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33 # via google-cloud-storage googleapis-common-protos==1.63.1 \ --hash=sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877 \ @@ -842,10 +838,6 @@ isodate==0.6.1 \ # azure-mgmt-compute # azure-storage-blob # msrest -isort==5.13.2 \ - --hash=sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109 \ - --hash=sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6 - # via pylint jaraco-classes==3.4.0 \ --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 @@ -892,45 +884,6 @@ kubernetes==24.2.0 \ --hash=sha256:9900f12ae92007533247167d14cdee949cd8c7721f88b4a7da5f5351da3834cd \ --hash=sha256:da19d58865cf903a8c7b9c3691a2e6315d583a98f0659964656dfdf645bf7e49 # via -r requirements.in -lazy-object-proxy==1.10.0 \ - --hash=sha256:009e6bb1f1935a62889ddc8541514b6a9e1fcf302667dcb049a0be5c8f613e56 \ - --hash=sha256:02c83f957782cbbe8136bee26416686a6ae998c7b6191711a04da776dc9e47d4 \ - --hash=sha256:0aefc7591920bbd360d57ea03c995cebc204b424524a5bd78406f6e1b8b2a5d8 \ - --hash=sha256:127a789c75151db6af398b8972178afe6bda7d6f68730c057fbbc2e96b08d282 \ - --hash=sha256:18dd842b49456aaa9a7cf535b04ca4571a302ff72ed8740d06b5adcd41fe0757 \ - --hash=sha256:217138197c170a2a74ca0e05bddcd5f1796c735c37d0eee33e43259b192aa424 \ - --hash=sha256:2297f08f08a2bb0d32a4265e98a006643cd7233fb7983032bd61ac7a02956b3b \ - --hash=sha256:2fc0a92c02fa1ca1e84fc60fa258458e5bf89d90a1ddaeb8ed9cc3147f417255 \ - --hash=sha256:30b339b2a743c5288405aa79a69e706a06e02958eab31859f7f3c04980853b70 \ - --hash=sha256:366c32fe5355ef5fc8a232c5436f4cc66e9d3e8967c01fb2e6302fd6627e3d94 \ - --hash=sha256:3ad54b9ddbe20ae9f7c1b29e52f123120772b06dbb18ec6be9101369d63a4074 \ - --hash=sha256:5ad9e6ed739285919aa9661a5bbed0aaf410aa60231373c5579c6b4801bd883c \ - --hash=sha256:5faf03a7d8942bb4476e3b62fd0f4cf94eaf4618e304a19865abf89a35c0bbee \ - --hash=sha256:75fc59fc450050b1b3c203c35020bc41bd2695ed692a392924c6ce180c6f1dc9 \ - --hash=sha256:76a095cfe6045c7d0ca77db9934e8f7b71b14645f0094ffcd842349ada5c5fb9 \ - --hash=sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69 \ - --hash=sha256:782e2c9b2aab1708ffb07d4bf377d12901d7a1d99e5e410d648d892f8967ab1f \ - --hash=sha256:7ab7004cf2e59f7c2e4345604a3e6ea0d92ac44e1c2375527d56492014e690c3 \ - --hash=sha256:80b39d3a151309efc8cc48675918891b865bdf742a8616a337cb0090791a0de9 \ - --hash=sha256:80fa48bd89c8f2f456fc0765c11c23bf5af827febacd2f523ca5bc1893fcc09d \ - --hash=sha256:855e068b0358ab916454464a884779c7ffa312b8925c6f7401e952dcf3b89977 \ - --hash=sha256:92f09ff65ecff3108e56526f9e2481b8116c0b9e1425325e13245abfd79bdb1b \ - --hash=sha256:952c81d415b9b80ea261d2372d2a4a2332a3890c2b83e0535f263ddfe43f0d43 \ - --hash=sha256:9a3a87cf1e133e5b1994144c12ca4aa3d9698517fe1e2ca82977781b16955658 \ - --hash=sha256:9e4ed0518a14dd26092614412936920ad081a424bdcb54cc13349a8e2c6d106a \ - --hash=sha256:a899b10e17743683b293a729d3a11f2f399e8a90c73b089e29f5d0fe3509f0dd \ - --hash=sha256:b1f711e2c6dcd4edd372cf5dec5c5a30d23bba06ee012093267b3376c079ec83 \ - --hash=sha256:b4f87d4ed9064b2628da63830986c3d2dca7501e6018347798313fcf028e2fd4 \ - --hash=sha256:cb73507defd385b7705c599a94474b1d5222a508e502553ef94114a143ec6696 \ - --hash=sha256:dc0d2fc424e54c70c4bc06787e4072c4f3b1aa2f897dfdc34ce1013cf3ceef05 \ - --hash=sha256:e221060b701e2aa2ea991542900dd13907a5c90fa80e199dbf5a03359019e7a3 \ - --hash=sha256:e271058822765ad5e3bca7f05f2ace0de58a3f4e62045a8c90a0dfd2f8ad8cc6 \ - --hash=sha256:e2adb09778797da09d2b5ebdbceebf7dd32e2c96f79da9052b2e87b6ea495895 \ - --hash=sha256:e333e2324307a7b5d86adfa835bb500ee70bfcd1447384a822e96495796b0ca4 \ - --hash=sha256:e98c8af98d5707dcdecc9ab0863c0ea6e88545d42ca7c3feffb6b4d1e370c7ba \ - --hash=sha256:edb45bb8278574710e68a6b021599a10ce730d156e5b254941754a9cc0b17d03 \ - --hash=sha256:fec03caabbc6b59ea4a638bee5fce7117be8e99a4103d9d5ad77f15d6f81020c - # via astroid ldap3==2.9.1 \ --hash=sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70 \ --hash=sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f @@ -1010,19 +963,15 @@ mbstrdecoder==1.1.3 \ # sqliteschema # subprocrunner # typepy -mccabe==0.6.1 \ - --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ - --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f - # via pylint -more-itertools==10.2.0 \ - --hash=sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684 \ - --hash=sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1 +more-itertools==10.3.0 \ + --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \ + --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320 # via # jaraco-classes # jaraco-functools -msal==1.28.0 \ - --hash=sha256:3064f80221a21cd535ad8c3fafbb3a3582cd9c7e9af0bb789ae14f726a0ca99b \ - --hash=sha256:80bbabe34567cb734efd2ec1869b2d98195c927455369d8077b3c542088c5c9d +msal==1.28.1 \ + --hash=sha256:563c2d70de77a2ca9786aab84cb4e133a38a6897e6676774edc23d610bfc9e7b \ + --hash=sha256:d72bbfe2d5c2f2555f4bc6205be4450ddfd12976610dd9a16a9ab0f05c68b64d # via # azure-identity # msal-extensions @@ -1195,53 +1144,53 @@ orderedmultidict==1.0.1 \ --hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \ --hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3 # via furl -orjson==3.10.3 \ - --hash=sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2 \ - --hash=sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b \ - --hash=sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5 \ - --hash=sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b \ - --hash=sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0 \ - --hash=sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0 \ - --hash=sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7 \ - --hash=sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42 \ - --hash=sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5 \ - --hash=sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa \ - --hash=sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818 \ - --hash=sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25 \ - --hash=sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08 \ - --hash=sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7 \ - --hash=sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b \ - --hash=sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754 \ - --hash=sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0 \ - --hash=sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912 \ - --hash=sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b \ - --hash=sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3 \ - --hash=sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700 \ - --hash=sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78 \ - --hash=sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290 \ - --hash=sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134 \ - --hash=sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294 \ - --hash=sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0 \ - --hash=sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5 \ - --hash=sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d \ - --hash=sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d \ - --hash=sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16 \ - --hash=sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195 \ - --hash=sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da \ - --hash=sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8 \ - --hash=sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098 \ - --hash=sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8 \ - --hash=sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063 \ - --hash=sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534 \ - --hash=sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d \ - --hash=sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109 \ - --hash=sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d \ - --hash=sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3 \ - --hash=sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2 \ - --hash=sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf \ - --hash=sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069 \ - --hash=sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7 \ - --hash=sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176 +orjson==3.10.5 \ + --hash=sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01 \ + --hash=sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa \ + --hash=sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5 \ + --hash=sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04 \ + --hash=sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d \ + --hash=sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e \ + --hash=sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b \ + --hash=sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b \ + --hash=sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268 \ + --hash=sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211 \ + --hash=sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c \ + --hash=sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c \ + --hash=sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969 \ + --hash=sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a \ + --hash=sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f \ + --hash=sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932 \ + --hash=sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26 \ + --hash=sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6 \ + --hash=sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214 \ + --hash=sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2 \ + --hash=sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f \ + --hash=sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96 \ + --hash=sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a \ + --hash=sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d \ + --hash=sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38 \ + --hash=sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807 \ + --hash=sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09 \ + --hash=sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6 \ + --hash=sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e \ + --hash=sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5 \ + --hash=sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86 \ + --hash=sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63 \ + --hash=sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2 \ + --hash=sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4 \ + --hash=sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595 \ + --hash=sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228 \ + --hash=sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9 \ + --hash=sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7 \ + --hash=sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40 \ + --hash=sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3 \ + --hash=sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139 \ + --hash=sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1 \ + --hash=sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b \ + --hash=sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47 \ + --hash=sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1 \ + --hash=sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca # via deepdiff packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -1287,9 +1236,7 @@ pip-tools==6.13.0 \ platformdirs==4.2.2 \ --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 - # via - # pylint - # virtualenv + # via virtualenv pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 @@ -1306,9 +1253,9 @@ prometheus-client==0.11.0 \ --hash=sha256:3a8baade6cb80bcfe43297e33e7623f3118d660d41387593758e2fb1ea173a86 \ --hash=sha256:b014bc76815eb1399da8ce5fc84b7717a3e63652b0c0f8804092c9363acab1b2 # via -r requirements.in -prompt-toolkit==3.0.46 \ - --hash=sha256:45abe60a8300f3c618b23c16c4bb98c6fc80af8ce8b17c7ae92db48db3ee63c1 \ - --hash=sha256:869c50d682152336e23c4db7f74667639b5047494202ffe7670817053fd57795 +prompt-toolkit==3.0.47 \ + --hash=sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10 \ + --hash=sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360 # via questionary proto-plus==1.23.0 \ --hash=sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2 \ @@ -1349,9 +1296,9 @@ pyasn1-modules==0.4.0 \ --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b # via google-auth -pycodestyle==2.11.1 \ - --hash=sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f \ - --hash=sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67 +pycodestyle==2.12.0 \ + --hash=sha256:442f950141b4f43df752dd303511ffded3a04c2b6fb7f65980574f0c31e6e79c \ + --hash=sha256:949a39f6b86c3e1515ba1787c2022131d165a8ad271b11370a8819aa070269e4 # via autopep8 pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -1480,10 +1427,6 @@ pyjwt[crypto]==2.8.0 \ # okta # pygithub # pyjwt -pylint==2.11.1 \ - --hash=sha256:0f358e221c45cbd4dad2a1e4b883e75d28acdcccd29d40c76eb72b307269b126 \ - --hash=sha256:2c9843fff1a88ca0ad98a256806c82c5a8f86086e7ccbdb93297d86c3f90c436 - # via -r requirements.in pynacl==1.5.0 \ --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ @@ -1683,6 +1626,25 @@ rsa==4.7.2 \ # via # awscli # google-auth +ruff==0.4.7 \ + --hash=sha256:07fc80bbb61e42b3b23b10fda6a2a0f5a067f810180a3760c5ef1b456c21b9db \ + --hash=sha256:10f2204b9a613988e3484194c2c9e96a22079206b22b787605c255f130db5ed7 \ + --hash=sha256:10f973d521d910e5f9c72ab27e409e839089f955be8a4c8826601a6323a89753 \ + --hash=sha256:13a1768b0691619822ae6d446132dbdfd568b700ecd3652b20d4e8bc1e498f78 \ + --hash=sha256:2331d2b051dc77a289a653fcc6a42cce357087c5975738157cd966590b18b5e1 \ + --hash=sha256:50e9651578b629baec3d1513b2534de0ac7ed7753e1382272b8d609997e27e83 \ + --hash=sha256:59c3d110970001dfa494bcd95478e62286c751126dfb15c3c46e7915fc49694f \ + --hash=sha256:769e5a51df61e07e887b81e6f039e7ed3573316ab7dd9f635c5afaa310e4030e \ + --hash=sha256:8874a9df7766cb956b218a0a239e0a5d23d9e843e4da1e113ae1d27ee420877a \ + --hash=sha256:9e3ab684ad403a9ed1226894c32c3ab9c2e0718440f6f50c7c5829932bc9e054 \ + --hash=sha256:a7c0083febdec17571455903b184a10026603a1de078428ba155e7ce9358c5f6 \ + --hash=sha256:ad1b20e66a44057c326168437d680a2166c177c939346b19c0d6b08a62a37589 \ + --hash=sha256:b9de9a6e49f7d529decd09381c0860c3f82fa0b0ea00ea78409b785d2308a567 \ + --hash=sha256:cbf5d818553add7511c38b05532d94a407f499d1a76ebb0cad0374e32bc67202 \ + --hash=sha256:e089371c67892a73b6bb1525608e89a2aca1b77b5440acf7a71dda5dac958f9e \ + --hash=sha256:fa4dafe3fe66d90e2e2b63fa1591dd6e3f090ca2128daa0be33db894e6c18648 \ + --hash=sha256:fa9773c6c00f4958f73b317bc0fd125295110c3776089f6ef318f4b775f0abe4 + # via -r requirements.in s3transfer==0.6.2 \ --hash=sha256:b014be3a8a2aab98cfe1abc7229cc5a9a0cf05eb9c1f2b86b230fd8df3f78084 \ --hash=sha256:cab66d3380cca3e70939ef2255d01cd8aece6a4907a9528740f668c4b0611861 @@ -1692,12 +1654,14 @@ s3transfer==0.6.2 \ scylla-driver==3.26.8 \ --hash=sha256:062e77352dc694cd6465a3e09436eebfb0ee2254806e75db5ebf65140355c26d \ --hash=sha256:0797cb40f4a93ae14132fef524cf3d7cd2b6644085a2de4ed740bf067e4bce45 \ + --hash=sha256:0a5ffd21e93af7f6f9620e7a8cf487d21bd0b638e41251e8563fd851d6f71a48 \ --hash=sha256:0b47bc799e8d89b874f999fac4c5f60421d569d512cf3604ac97a35022af3a79 \ --hash=sha256:0faf834985af19a80e4fbb8503e91351c6419651821e705ec98a2bc2849a9548 \ --hash=sha256:13fdff491c191f992da006382749f216949f3565eaea3854410cae8ee5ff6e57 \ --hash=sha256:146b080e79ea11e3ae0753ae9b06c8005bf53e958697caeec9eb3f77666a570d \ --hash=sha256:15380a4c5038d79456e34e7fe6db2e4a7de9f32aea1b2156fee8753324d979fd \ --hash=sha256:20a4d18a69e710bf0ce41db77249097761806356f58a2a6792fe55418a753ad5 \ + --hash=sha256:2193ba6ef01cd713c81ce722a38601a12f4b7dd9dca16e15609127d93cab3167 \ --hash=sha256:2433beb87519599830c4471de61c6429d7f1a1b32be515aea8dc83dd8b3d1d0c \ --hash=sha256:2db6847d75bd6f5d26ec211f092e65c22d83012a2fe8c9b344ed4247e68d29de \ --hash=sha256:3ee479330cc79f1f73036e4d32fd9838f6934e9bfa0f4f4cb2f93c6c44aedfdb \ @@ -1709,12 +1673,15 @@ scylla-driver==3.26.8 \ --hash=sha256:51b37667176d8ec775baac219e0e1d9344d9bc493e4b3d2887efee5661279368 \ --hash=sha256:5524dc80bd47a6f8395478dee9fb1a6ee092fe8b915e9b1dc3c089ebb61caef0 \ --hash=sha256:57c388298e5520004f4e46794578ccdf655b1873c313a2240b24747daac7edec \ + --hash=sha256:593527ba7a4b684ea9f28a50f205642aac6f3c3b913a54f39c41583b0a2f893b \ --hash=sha256:5ccf0a7c2b880093083adfe5d4e2ac66a45bd5008396da00e6e8ccaa0a4e9103 \ --hash=sha256:74565836561208c5106165fec6248c1b3a0e13a1fecbb8fbdea77ed4db2d20d0 \ --hash=sha256:756db4d57b076aaa67f9c457922b10376e74d2bb8ad16f0aae79d6b5b22d6b4b \ --hash=sha256:761fbf0f2c51de8f8327e5cd6f2a4c49b9e2a401907905332c14f3491089674a \ --hash=sha256:7677a217a70352c0c53eef96b4be9fdfd6395c29f6c0940dd143a9c4749011dc \ + --hash=sha256:76e5370fac089421f13621471b8ebcf381e6b8fa230d8ffce0aa36ad344bf3de \ --hash=sha256:7ddd3e8df44609bf7f6dd2d403f7cb6845f3b90180d9d1fc8eff17d91b8d47e2 \ + --hash=sha256:851039bb9421120efd97c08631355d4421508599324e8c15b5a0b0c6365a1ea4 \ --hash=sha256:870a0fd86644b50a0688496a2c6b0d00b72152a108011c80381f95a448485c13 \ --hash=sha256:8817aee4de971c0e1c4a09201a9ca1476fa1112bc4e466e4a07e5a033db8be1a \ --hash=sha256:8bb0f741bf7d8ad9cba1ee1bb52511654ea771c7aa67661482204c185201eff4 \ @@ -1725,11 +1692,14 @@ scylla-driver==3.26.8 \ --hash=sha256:94525445588d6e1982bb624460d1e53f32ea854f288dd15696ba655e30df0cb8 \ --hash=sha256:b1bef1b12a8d77600ff10e5cd39a8cd66f742b1f9b9a852a18fe9fd245ad1919 \ --hash=sha256:bb10c43e04ba9a2f8dc510d5133cf6bd299d6349dd4e4c2927fb968a6b10ee4a \ + --hash=sha256:be99bc3b4c33ee26c84d756d9c2e5e4f4ae1d6b0f2c9a1e49a38836129b13afd \ --hash=sha256:c25298a52c115f2cb345cd9ca0f71e2a4c9584f386fbb57373cb8de46f1b7e0d \ + --hash=sha256:c7f71d6db04da42b15fb5c602c3410560df992c046df486938345fcb996bd151 \ --hash=sha256:cd4a384d3fdf0f1b110d7a249bdd3c2a68b16c2ed63c3dec3c85b32ee9fa2273 \ --hash=sha256:d3bc4cd9e40ba74f38d607161aa39ba6ed8d0af28bc989d373c2bce5ca8f6175 \ --hash=sha256:d93ebfe445173e9675e52906cc55bab6bd051cc6442cf9594e7e1578e23e8d8b \ --hash=sha256:d9c03f7ac60badf63cfeac3fa02e07d2b2bf9b76f426ab13b3dae5cc6891a453 \ + --hash=sha256:e33f410f7ba1034acd2559cfbceca9c51722ff665e344c0bd04d4c95e99ff3d9 \ --hash=sha256:e75d16a710b512707da96a74b3250749468b5067dd1f9ad852df57875a63df0a \ --hash=sha256:f196bc61feebd5bde0707f876241c9e96a9c70a2bcae91cdae3a28599147662c \ --hash=sha256:f3af534619ecadfce4fdd9ee3ba34f4764a431c976c2a79cac09e4527d557007 @@ -1831,7 +1801,6 @@ toml==0.10.2 \ # via # autopep8 # pre-commit - # pylint tomli==2.0.1 \ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f @@ -1848,17 +1817,17 @@ typepy[datetime]==1.3.2 \ # sqliteschema # tabledata # tcconfig -types-awscrt==0.20.9 \ - --hash=sha256:3ae374b553e7228ba41a528cf42bd0b2ad7303d806c73eff4aaaac1515e3ea4e \ - --hash=sha256:64898a2f4a2468f66233cb8c29c5f66de907cf80ba1ef5bb1359aef2f81bb521 +types-awscrt==0.20.12 \ + --hash=sha256:0beabdde0205dc1da679ea464fd3f98b570ef4f0fc825b155a974fb51b21e8d9 \ + --hash=sha256:521ce54cc4dad9fe6480556bb0f8315a508106938ba1f2a0baccfcea7d4a4dee # via botocore-stubs types-s3transfer==0.10.1 \ --hash=sha256:02154cce46528287ad76ad1a0153840e0492239a0887e8833466eccf84b98da0 \ --hash=sha256:49a7c81fa609ac1532f8de3756e64b58afcecad8767933310228002ec7adff74 # via boto3-stubs -typing-extensions==4.12.1 \ - --hash=sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a \ - --hash=sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1 +typing-extensions==4.12.2 \ + --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ + --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 # via # azure-core # azure-storage-blob @@ -1909,61 +1878,78 @@ wheel==0.43.0 \ --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 # via pip-tools -wrapt==1.13.3 \ - --hash=sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179 \ - --hash=sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096 \ - --hash=sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374 \ - --hash=sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df \ - --hash=sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185 \ - --hash=sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785 \ - --hash=sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7 \ - --hash=sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909 \ - --hash=sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918 \ - --hash=sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33 \ - --hash=sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068 \ - --hash=sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829 \ - --hash=sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af \ - --hash=sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79 \ - --hash=sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce \ - --hash=sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc \ - --hash=sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36 \ - --hash=sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade \ - --hash=sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca \ - --hash=sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32 \ - --hash=sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125 \ - --hash=sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e \ - --hash=sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709 \ - --hash=sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f \ - --hash=sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b \ - --hash=sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb \ - --hash=sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb \ - --hash=sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489 \ - --hash=sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640 \ - --hash=sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb \ - --hash=sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851 \ - --hash=sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d \ - --hash=sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44 \ - --hash=sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13 \ - --hash=sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2 \ - --hash=sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb \ - --hash=sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b \ - --hash=sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9 \ - --hash=sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755 \ - --hash=sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c \ - --hash=sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a \ - --hash=sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf \ - --hash=sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3 \ - --hash=sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229 \ - --hash=sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e \ - --hash=sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de \ - --hash=sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554 \ - --hash=sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10 \ - --hash=sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80 \ - --hash=sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056 \ - --hash=sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea - # via - # astroid - # deprecated +wrapt==1.16.0 \ + --hash=sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc \ + --hash=sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81 \ + --hash=sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09 \ + --hash=sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e \ + --hash=sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca \ + --hash=sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0 \ + --hash=sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb \ + --hash=sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487 \ + --hash=sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40 \ + --hash=sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c \ + --hash=sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060 \ + --hash=sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202 \ + --hash=sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41 \ + --hash=sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9 \ + --hash=sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b \ + --hash=sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664 \ + --hash=sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d \ + --hash=sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362 \ + --hash=sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00 \ + --hash=sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc \ + --hash=sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1 \ + --hash=sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267 \ + --hash=sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956 \ + --hash=sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966 \ + --hash=sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1 \ + --hash=sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228 \ + --hash=sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72 \ + --hash=sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d \ + --hash=sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292 \ + --hash=sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0 \ + --hash=sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0 \ + --hash=sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36 \ + --hash=sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c \ + --hash=sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5 \ + --hash=sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f \ + --hash=sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73 \ + --hash=sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b \ + --hash=sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2 \ + --hash=sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593 \ + --hash=sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39 \ + --hash=sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389 \ + --hash=sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf \ + --hash=sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf \ + --hash=sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89 \ + --hash=sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c \ + --hash=sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c \ + --hash=sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f \ + --hash=sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440 \ + --hash=sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465 \ + --hash=sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136 \ + --hash=sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b \ + --hash=sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8 \ + --hash=sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3 \ + --hash=sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8 \ + --hash=sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6 \ + --hash=sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e \ + --hash=sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f \ + --hash=sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c \ + --hash=sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e \ + --hash=sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8 \ + --hash=sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2 \ + --hash=sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020 \ + --hash=sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35 \ + --hash=sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d \ + --hash=sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3 \ + --hash=sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537 \ + --hash=sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809 \ + --hash=sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d \ + --hash=sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a \ + --hash=sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4 + # via deprecated xmltodict==0.13.0 \ --hash=sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56 \ --hash=sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852 @@ -2077,6 +2063,5 @@ setuptools==70.0.0 \ --hash=sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0 # via # anyconfig - # astroid # kubernetes # pip-tools diff --git a/sct.py b/sct.py index 549372c333e..dba5060decb 100755 --- a/sct.py +++ b/sct.py @@ -321,7 +321,7 @@ def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend) @sct_option('--test-id', 'test_id', help='test id to filter by') @click.option('--verbose', is_flag=True, default=False, help='if enable, will log progress') @click.pass_context -def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): +def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-arguments,too-many-branches,too-many-statements add_file_logger() @@ -1270,7 +1270,7 @@ def get_test_results_for_failed_test(test_status, start_time): @click.option('--runner-ip', type=str, required=False, help="Sct runner ip for the running test") @click.option('--email-recipients', help="Send email to next recipients") @click.option('--logdir', help='Directory where to find testrun folder') -def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, +def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, # noqa: PLR0912 email_recipients=None, logdir=None): if started_by is None: started_by = get_username() @@ -1475,11 +1475,11 @@ def prepare_regions(cloud_provider, regions): for region in regions: if cloud_provider == "aws": - region = AwsRegion(region_name=region) + region = AwsRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "azure": - region = AzureRegion(region_name=region) + region = AzureRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "gce": - region = GceRegion(region_name=region) + region = GceRegion(region_name=region) # noqa: PLW2901 else: raise Exception(f'Unsupported Cloud provider: `{cloud_provider}') region.configure() diff --git a/sdcm/audit.py b/sdcm/audit.py index 177d708f8de..47cb3df7292 100644 --- a/sdcm/audit.py +++ b/sdcm/audit.py @@ -121,7 +121,7 @@ def get_audit_log_rows(node, # pylint: disable=too-many-locals if '!NOTICE' in line[:120] and 'scylla-audit' in line[:120]: while line[-2] != '"': # read multiline audit log (must end with ") - line += log_file.readline() + line += log_file.readline() # noqa: PLW2901 audit_data = line.split(': "', maxsplit=1)[-1] try: node, cat, consistency, table, keyspace_name, opr, source, username, error = audit_data.split( diff --git a/sdcm/cluster.py b/sdcm/cluster.py index 812b6a0a76e..46c04e369b7 100644 --- a/sdcm/cluster.py +++ b/sdcm/cluster.py @@ -956,15 +956,14 @@ def start_journal_thread(self): if self._journal_thread: self.log.debug("Use %s as logging daemon", type(self._journal_thread).__name__) self._journal_thread.start() + elif logs_transport == 'syslog-ng': + self.log.debug("Use no logging daemon since log transport is syslog-ng") else: - if logs_transport == 'syslog-ng': - self.log.debug("Use no logging daemon since log transport is syslog-ng") - else: - TestFrameworkEvent( - source=self.__class__.__name__, - source_method='start_journal_thread', - message="Got no logging daemon by unknown reason" - ).publish_or_dump() + TestFrameworkEvent( + source=self.__class__.__name__, + source_method='start_journal_thread', + message="Got no logging daemon by unknown reason" + ).publish_or_dump() def start_coredump_thread(self): self._coredump_thread = CoredumpExportSystemdThread(self, self._maximum_number_of_cores_to_publish) @@ -1223,7 +1222,7 @@ def is_port_used(self, port: int, service_name: str) -> bool: # this is the case output is empty return False else: - self.log.error("Error checking for '%s' on port %s: rc:", service_name, port, result) + self.log.error("Error checking for '%s' on port %s: rc: %s", service_name, port, result) return False except Exception as details: # pylint: disable=broad-except self.log.error("Error checking for '%s' on port %s: %s", service_name, port, details) @@ -2699,7 +2698,7 @@ def get_nodes_status(self) -> dict[BaseNode, dict]: if node := node_ip_map.get(node_ip): nodes_status[node] = {'status': node_properties['state'], 'dc': dc, 'rack': node_properties['rack']} - else: + else: # noqa: PLR5501 if node_ip: LOGGER.error("Get nodes statuses. Failed to find a node in cluster by IP: %s", node_ip) @@ -3461,7 +3460,7 @@ def create_ssl_context(keyfile: str, certfile: str, truststore: str): ssl_context.load_verify_locations(cafile=truststore) return ssl_context - def _create_session(self, node, keyspace, user, password, compression, protocol_version, load_balancing_policy=None, port=None, + def _create_session(self, node, keyspace, user, password, compression, protocol_version, load_balancing_policy=None, port=None, # noqa: PLR0913 ssl_context=None, node_ips=None, connect_timeout=None, verbose=True, connection_bundle_file=None): if not port: port = node.CQL_PORT @@ -5119,7 +5118,7 @@ def kill_stress_thread(self): if self.nodes and self.nodes[0].is_kubernetes(): for node in self.nodes: node.remoter.stop() - else: + else: # noqa: PLR5501 if self.params.get("use_prepared_loaders"): self.kill_cassandra_stress_thread() else: @@ -5171,7 +5170,7 @@ def _parse_cs_summary(lines): enable_parse = False for line in lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 if not line: continue # Parse loader & cpu info diff --git a/sdcm/cluster_aws.py b/sdcm/cluster_aws.py index 0f33f2c38e9..a2f6db278fe 100644 --- a/sdcm/cluster_aws.py +++ b/sdcm/cluster_aws.py @@ -80,7 +80,7 @@ class AWSCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr Cluster of Node objects, started on Amazon EC2. """ - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, cluster_uuid=None, ec2_instance_type='c6i.xlarge', ec2_ami_username='root', ec2_user_data='', ec2_block_device_mappings=None, @@ -356,7 +356,7 @@ def update_bootstrap(ec2_user_data, enable_auto_bootstrap): ec2_user_data.replace('--bootstrap false', '--bootstrap true') else: ec2_user_data += ' --bootstrap true ' - else: + else: # noqa: PLR5501 if '--bootstrap ' in ec2_user_data: ec2_user_data.replace('--bootstrap true', '--bootstrap false') else: @@ -844,7 +844,7 @@ def ena_support(self) -> bool: class ScyllaAWSCluster(cluster.BaseScyllaCluster, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c6i.xlarge', ec2_ami_username='centos', ec2_block_device_mappings=None, @@ -1050,7 +1050,7 @@ def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: class MonitorSetAWS(cluster.BaseMonitorSet, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c6i.xlarge', ec2_block_device_mappings=None, ec2_ami_username='centos', diff --git a/sdcm/cluster_azure.py b/sdcm/cluster_azure.py index ef9623a4c14..758efe68142 100644 --- a/sdcm/cluster_azure.py +++ b/sdcm/cluster_azure.py @@ -174,7 +174,7 @@ def configure_remote_logging(self) -> None: class AzureCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attributes - def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals + def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals # noqa: PLR0913 provisioners: List[AzureProvisioner], credentials, cluster_uuid=None, instance_type='Standard_L8s_v3', region_names=None, user_name='root', cluster_prefix='cluster', diff --git a/sdcm/cluster_gce.py b/sdcm/cluster_gce.py index 881b58bdeb0..44bef1e328d 100644 --- a/sdcm/cluster_gce.py +++ b/sdcm/cluster_gce.py @@ -248,7 +248,7 @@ class GCECluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr """ _gce_service: compute_v1.InstancesClient - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 cluster_uuid=None, gce_instance_type='n1-standard-1', gce_region_names=None, gce_n_local_ssd=1, gce_image_username='root', cluster_prefix='cluster', node_prefix='node', n_nodes=3, add_disks=None, params=None, node_type=None, @@ -336,7 +336,7 @@ def _get_disks_struct(self, name, dc_idx): gce_disk_struct.append(self._get_local_ssd_disk_struct(name=name, index=i, dc_idx=dc_idx)) if self._add_disks: for disk_type, disk_size in self._add_disks.items(): - disk_size = int(disk_size) + disk_size = int(disk_size) # noqa: PLW2901 if disk_size: gce_disk_struct.append(self._get_persistent_disk_struct(name=name, disk_size=disk_size, disk_type=disk_type, dc_idx=dc_idx)) @@ -542,7 +542,7 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots class ScyllaGCECluster(cluster.BaseScyllaCluster, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=3, add_disks=None, params=None, gce_datacenter=None, service_accounts=None): @@ -578,7 +578,7 @@ def _wait_for_preinstalled_scylla(node): class LoaderSetGCE(cluster.BaseLoaderSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=10, add_disks=None, params=None, gce_datacenter=None): @@ -609,7 +609,7 @@ def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_s class MonitorSetGCE(cluster.BaseMonitorSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=1, targets=None, add_disks=None, params=None, gce_datacenter=None, diff --git a/sdcm/cluster_k8s/__init__.py b/sdcm/cluster_k8s/__init__.py index a5a516b4aa0..48e1f6bb46d 100644 --- a/sdcm/cluster_k8s/__init__.py +++ b/sdcm/cluster_k8s/__init__.py @@ -2783,7 +2783,7 @@ def _get_rack_nodes(self, rack: int, dc_idx: int) -> list: return sorted( [node for node in self.nodes if node.rack == rack and node.dc_idx == dc_idx], key=lambda n: n.name) - def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches + def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches noqa: PLR0913 count: int, ec2_user_data: str = "", # NOTE: 'dc_idx=None' means 'create %count% nodes on each K8S cluster' diff --git a/sdcm/cluster_k8s/eks.py b/sdcm/cluster_k8s/eks.py index 818adc73a37..3e611c4271c 100644 --- a/sdcm/cluster_k8s/eks.py +++ b/sdcm/cluster_k8s/eks.py @@ -162,7 +162,7 @@ class EksNodePool(CloudK8sNodePool): disk_type: Literal["standard", "io1", "io2", "gp2", "gp3", "sc1", "st1"] # pylint: disable=too-many-arguments,too-many-locals - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'EksCluster', name: str, @@ -340,7 +340,7 @@ class EksCluster(KubernetesCluster, EksClusterCleanupMixin): # pylint: disable= short_cluster_name: str # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 eks_cluster_version, ec2_security_group_ids, ec2_subnet_ids, diff --git a/sdcm/cluster_k8s/gke.py b/sdcm/cluster_k8s/gke.py index 8b3352fd15c..9690fd81b4d 100644 --- a/sdcm/cluster_k8s/gke.py +++ b/sdcm/cluster_k8s/gke.py @@ -142,7 +142,7 @@ class GkeNodePool(CloudK8sNodePool): k8s_cluster: 'GkeCluster' # pylint: disable=too-many-arguments - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'KubernetesCluster', name: str, @@ -267,7 +267,7 @@ class GkeCluster(KubernetesCluster): pools: Dict[str, GkeNodePool] # pylint: disable=too-many-arguments,too-many-locals - def __init__(self, + def __init__(self, # noqa: PLR0913 gke_cluster_version, gke_k8s_release_channel, gce_disk_size, diff --git a/sdcm/cluster_k8s/mini_k8s.py b/sdcm/cluster_k8s/mini_k8s.py index f1613c4cfae..32f40315937 100644 --- a/sdcm/cluster_k8s/mini_k8s.py +++ b/sdcm/cluster_k8s/mini_k8s.py @@ -607,9 +607,8 @@ def on_deploy_completed(self): # pylint: disable=too-many-branches images_to_cache.extend(self.cert_manager_images) if self.params.get("k8s_local_volume_provisioner_type") != 'static': images_to_cache.append(self.dynamic_local_volume_provisioner_image) - else: - if provisioner_image := self.static_local_volume_provisioner_image: - images_to_cache.append(provisioner_image) + elif provisioner_image := self.static_local_volume_provisioner_image: + images_to_cache.append(provisioner_image) if self.params.get("k8s_use_chaos_mesh"): chaos_mesh_version = ChaosMesh.VERSION if not chaos_mesh_version.startswith("v"): diff --git a/sdcm/coredump.py b/sdcm/coredump.py index 3719a4e74ae..a16bd5e3efe 100644 --- a/sdcm/coredump.py +++ b/sdcm/coredump.py @@ -397,7 +397,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # # Coredump could be absent when file was removed for line in coredump_info: - line = line.strip() + line = line.strip() # noqa: PLW2901 if line.startswith('Executable:'): executable = line[12:].strip() elif line.startswith('Command Line:'): @@ -407,7 +407,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # Storage: /var/lib/systemd/coredump/core.vi.1000.6c4de4c206a0476e88444e5ebaaac482.18554.1578994298000000.lz4 (inaccessible) if "inaccessible" in line: continue - line = line.replace('(present)', '') + line = line.replace('(present)', '') # noqa: PLW2901 corefile = line[line.find(':') + 1:].strip() elif line.startswith('Timestamp:'): timestring = None diff --git a/sdcm/cql_stress_cassandra_stress_thread.py b/sdcm/cql_stress_cassandra_stress_thread.py index 3d4c3806d63..59d801d417b 100644 --- a/sdcm/cql_stress_cassandra_stress_thread.py +++ b/sdcm/cql_stress_cassandra_stress_thread.py @@ -57,7 +57,7 @@ def run(self) -> None: class CqlStressCassandraStressThread(CassandraStressThread): DOCKER_IMAGE_PARAM_NAME = 'stress_image.cql-stress-cassandra-stress' - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments # noqa: PLR0913 profile=None, node_list=None, round_robin=False, client_encrypt=False, stop_test_on_failure=True, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, diff --git a/sdcm/db_log_reader.py b/sdcm/db_log_reader.py index a046de58622..8a145ebb6c4 100644 --- a/sdcm/db_log_reader.py +++ b/sdcm/db_log_reader.py @@ -81,7 +81,7 @@ def __init__(self, def _continuous_event_patterns(self): return get_pattern_to_event_to_func_mapping(node=self._node_name) - def _read_and_publish_events(self) -> None: + def _read_and_publish_events(self) -> None: # noqa: PLR0912 """Search for all known patterns listed in `sdcm.sct_events.database.SYSTEM_ERROR_EVENTS'.""" # pylint: disable=too-many-branches,too-many-locals,too-many-statements @@ -98,7 +98,7 @@ def _read_and_publish_events(self) -> None: for index, line in enumerate(db_file, start=self._last_line_no + 1): if len(line) > LOG_LINE_MAX_PROCESSING_SIZE: # trim to avoid filling the memory when lot of long line is writen - line = line[:LOG_LINE_MAX_PROCESSING_SIZE] + line = line[:LOG_LINE_MAX_PROCESSING_SIZE] # noqa: PLW2901 # Postpone processing line with no ending in case if half of line is written to the disc if line[-1] == '\n' or self._skipped_end_line > 20: @@ -115,7 +115,7 @@ def _read_and_publish_events(self) -> None: pass if self._log_lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 for pattern in self.EXCLUDE_FROM_LOGGING: if pattern in line: break diff --git a/sdcm/db_stats.py b/sdcm/db_stats.py index 1151b7bfd5e..c974cf12a1d 100644 --- a/sdcm/db_stats.py +++ b/sdcm/db_stats.py @@ -607,17 +607,16 @@ def get_setup_details(self): for key, value in test_params: if key in exclude_details or (isinstance(key, str) and key.startswith('stress_cmd')): # pylint: disable=no-else-continue continue + elif is_gce and key in \ + ['instance_type_loader', # pylint: disable=no-else-continue + 'instance_type_monitor', + 'instance_type_db']: + # exclude these params from gce run + continue + elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc + setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) else: - if is_gce and key in \ - ['instance_type_loader', # pylint: disable=no-else-continue - 'instance_type_monitor', - 'instance_type_db']: - # exclude these params from gce run - continue - elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc - setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) - else: - setup_details[key] = value + setup_details[key] = value if self.params.get('cluster_backend') == 'aws': setup_details["ami_tags_db_scylla"] = [] diff --git a/sdcm/ec2_client.py b/sdcm/ec2_client.py index 901e741815b..8cdb22ef649 100644 --- a/sdcm/ec2_client.py +++ b/sdcm/ec2_client.py @@ -69,7 +69,7 @@ def _get_ec2_client(self, region_name=None) -> EC2Client: boto3.setup_default_session(region_name=region_name) return self._get_ec2_client() - def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments + def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments # noqa: PLR0913 user_data='', count=1, duration=0, request_type='one-time', block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): """ diff --git a/sdcm/fill_db_data.py b/sdcm/fill_db_data.py index 50f86698abd..22fd951f8b2 100644 --- a/sdcm/fill_db_data.py +++ b/sdcm/fill_db_data.py @@ -3118,7 +3118,7 @@ def cql_create_tables(self, session): with self._execute_and_log(f'Created tables for test "{test_name}" in {{}} seconds'): for create_table in item['create_tables']: if self.version_cdc_support(): - create_table = self._enable_cdc(item, create_table) + create_table = self._enable_cdc(item, create_table) # noqa: PLW2901 # wait a while before creating index, there is a delay of create table for # waiting the schema agreement if 'CREATE INDEX' in create_table.upper(): @@ -3384,11 +3384,11 @@ def fill_table(): self.log.info('running now session.execute') full_query_res = self.rows_to_list(session.execute(statement)) if not full_query_res: - assert f'Query "{statement}" returned no entries' + assert f'Query "{statement}" returned no entries' # noqa: PLW0129 self.log.info('running now fetch_all_rows') full_res = self.rows_to_list( fetch_all_rows(session=session, default_fetch_size=100, statement=statement)) if not full_res: - assert f'Paged query "{statement}" returned no value' + assert f'Paged query "{statement}" returned no value' # noqa: PLW0129 self.log.info('will now compare results from session.execute and fetch_all_rows') self.assertEqual(sorted(full_query_res), sorted(full_res), "Results should be identical") diff --git a/sdcm/gemini_thread.py b/sdcm/gemini_thread.py index fd0eca6da30..6f91ba23f53 100644 --- a/sdcm/gemini_thread.py +++ b/sdcm/gemini_thread.py @@ -137,7 +137,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): if result.exited: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.ERROR - else: + else: # noqa: PLR5501 if result.stderr: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.WARNING diff --git a/sdcm/mgmt/cli.py b/sdcm/mgmt/cli.py index 2765fb117aa..cfe9a8cd8de 100644 --- a/sdcm/mgmt/cli.py +++ b/sdcm/mgmt/cli.py @@ -548,7 +548,7 @@ def create_restore_task(self, restore_schema=False, restore_data=False, location LOGGER.debug("Created task id is: {}".format(task_id)) return RestoreTask(task_id=task_id, cluster_id=self.id, manager_node=self.manager_node) - def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches + def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches # noqa: PLR0913 dry_run=None, interval=None, keyspace_list=None, cron=None, location_list=None, num_retries=None, rate_limit_list=None, retention=None, show_tables=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, legacy_args=None): diff --git a/sdcm/mgmt/operator.py b/sdcm/mgmt/operator.py index 05085132ced..8db746b6194 100644 --- a/sdcm/mgmt/operator.py +++ b/sdcm/mgmt/operator.py @@ -223,7 +223,7 @@ def wait_for_healthchecks(self): throw_exc=True, ) - def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, + def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, # noqa: PLR0913 num_retries=None, rate_limit_list=None, retention=None, cron=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, name=None) -> ScyllaOperatorBackupTask: @@ -254,7 +254,7 @@ def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_lis return so_backup_task # pylint: disable=too-many-locals - def create_backup_task( + def create_backup_task( # noqa: PLR0913 self, dc_list=None, dry_run=None, diff --git a/sdcm/monitorstack/__init__.py b/sdcm/monitorstack/__init__.py index fb50926b76c..a4085701bf3 100644 --- a/sdcm/monitorstack/__init__.py +++ b/sdcm/monitorstack/__init__.py @@ -37,7 +37,7 @@ class ErrorUploadAnnotations(Exception): pass -def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals +def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals # noqa: PLR0911 if not is_docker_available(): return False diff --git a/sdcm/nemesis.py b/sdcm/nemesis.py index f05080414f7..880e18e7db5 100644 --- a/sdcm/nemesis.py +++ b/sdcm/nemesis.py @@ -261,10 +261,10 @@ def _init_num_deletions_factor(self): if not isinstance(stress_cmds, list): stress_cmds = [stress_cmds] for stress_cmd in stress_cmds: - stress_cmd = stress_cmd.split() + stress_cmd_splitted = stress_cmd.split() # In case background load has writes, we can delete all available partitions, # since they are rewritten. Otherwise, we can only delete some of them. - if 'scylla-bench' in stress_cmd and '-mode=write' in stress_cmd: + if 'scylla-bench' in stress_cmd_splitted and '-mode=write' in stress_cmd_splitted: self.num_deletions_factor = 1 break @@ -480,7 +480,7 @@ def _is_it_on_kubernetes(self) -> bool: return isinstance(getattr(self.tester, "db_cluster", None), PodCluster) # pylint: disable=too-many-arguments,unused-argument - def get_list_of_methods_by_flags( # pylint: disable=too-many-locals + def get_list_of_methods_by_flags( # pylint: disable=too-many-locals # noqa: PLR0913 self, disruptive: Optional[bool] = None, run_with_gemini: Optional[bool] = None, @@ -1373,9 +1373,9 @@ def _verify_resharding_on_k8s(self, cpus, dc_idx): # Check that liveness probe didn't report any errors # https://github.com/scylladb/scylla-operator/issues/894 - liveness_probe_failures = list(liveness_probe_failures) - assert not liveness_probe_failures, ( - f"There are liveness probe failures: {liveness_probe_failures}") + liveness_probe_failures_return = list(liveness_probe_failures) + assert not liveness_probe_failures_return, ( + f"There are liveness probe failures: {liveness_probe_failures_return}") self.log.info("Resharding has successfully ended on whole Scylla cluster.") @@ -2739,12 +2739,11 @@ def set_new_twcs_settings(settings: Dict[str, Any]) -> Dict[str, Any]: current_size = 3 else: current_size += 10 + elif (current_size // 60) > 10: + current_unit = "HOURS" + current_size = 11 else: - if (current_size // 60) > 10: - current_unit = "HOURS" - current_size = 11 - else: - current_size += 35 + current_size += 35 settings["gc"] = current_size * multiplier * expected_sstable_number // 2 settings["dttl"] = current_size * multiplier * expected_sstable_number @@ -5009,7 +5008,7 @@ def disrupt_disable_binary_gossip_execute_major_compaction(self): raise -def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements +def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements # noqa: PLR0915 """ Log time elapsed for method to run @@ -5576,17 +5575,17 @@ def prefixed(pref: str, val: str) -> str: weights: List[float] = [] listed_methods: Set[str] = set() - for name, weight in dist.items(): - name = str(name) + for _name, _weight in dist.items(): + name = str(_name) prefixed_name = prefixed('disrupt_', name) if prefixed_name not in all_methods: raise ValueError(f"'{name}' is not a valid disruption. All methods: {all_methods.keys()}") - if not is_nonnegative_number(weight): + if not is_nonnegative_number(_weight): raise ValueError("Each disruption weight must be a non-negative number." " '{weight}' is not a valid weight.") - weight = float(weight) + weight = float(_weight) if weight > 0: population.append(all_methods[prefixed_name]) weights.append(weight) diff --git a/sdcm/prometheus.py b/sdcm/prometheus.py index 53d1c77c1cf..ee3a53840a4 100644 --- a/sdcm/prometheus.py +++ b/sdcm/prometheus.py @@ -68,7 +68,7 @@ def start_metrics_server(): def nemesis_metrics_obj(metric_name_suffix=''): - global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned + global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned # noqa: PLW0602 if not NM_OBJ.get(metric_name_suffix): NM_OBJ[metric_name_suffix] = NemesisMetrics(metric_name_suffix) return NM_OBJ[metric_name_suffix] @@ -184,7 +184,7 @@ def _publish_end_of_alerts(self, alerts: dict): for alert in alerts.values(): if not alert.get('endsAt', None): alert['endsAt'] = time.strftime("%Y-%m-%dT%H:%M:%S.0Z", time.gmtime()) - alert = updated_dict.get(alert['fingerprint'], alert) + alert = updated_dict.get(alert['fingerprint'], alert) # noqa: PLW2901 labels = alert.get("labels") or {} alert_name = labels.get("alertname", "") node = labels.get("instance", "N/A") diff --git a/sdcm/provision/azure/ip_provider.py b/sdcm/provision/azure/ip_provider.py index 98e7b90051a..d996b37f88b 100644 --- a/sdcm/provision/azure/ip_provider.py +++ b/sdcm/provision/azure/ip_provider.py @@ -37,7 +37,7 @@ def __post_init__(self): try: ips = self._azure_service.network.public_ip_addresses.list(self._resource_group_name) for ip in ips: - ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) + ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) # noqa: PLW2901 self._cache[ip.name] = ip except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/network_interface_provider.py b/sdcm/provision/azure/network_interface_provider.py index 78eef78907e..eb257e97e9f 100644 --- a/sdcm/provision/azure/network_interface_provider.py +++ b/sdcm/provision/azure/network_interface_provider.py @@ -36,7 +36,7 @@ def __post_init__(self): try: nics = self._azure_service.network.network_interfaces.list(self._resource_group_name) for nic in nics: - nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) + nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) # noqa: PLW2901 self._cache[nic.name] = nic except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/virtual_machine_provider.py b/sdcm/provision/azure/virtual_machine_provider.py index 72d827f06e5..77159e797e5 100644 --- a/sdcm/provision/azure/virtual_machine_provider.py +++ b/sdcm/provision/azure/virtual_machine_provider.py @@ -43,8 +43,8 @@ def __post_init__(self): """Discover existing virtual machines for resource group.""" try: v_ms = self._azure_service.compute.virtual_machines.list(self._resource_group_name) - for v_m in v_ms: - v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, v_m.name) + for _v_m in v_ms: + v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, _v_m.name) if v_m.provisioning_state != "Deleting": self._cache[v_m.name] = v_m except ResourceNotFoundError: diff --git a/sdcm/provision/azure/virtual_network_provider.py b/sdcm/provision/azure/virtual_network_provider.py index a3c959fa443..cb510cd56af 100644 --- a/sdcm/provision/azure/virtual_network_provider.py +++ b/sdcm/provision/azure/virtual_network_provider.py @@ -36,8 +36,8 @@ def __post_init__(self): """Discover existing virtual networks for resource group.""" try: vnets = self._azure_service.network.virtual_networks.list(self._resource_group_name) - for vnet in vnets: - vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, vnet.name) + for _vnet in vnets: + vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, _vnet.name) self._cache[vnet.name] = vnet except ResourceNotFoundError: pass diff --git a/sdcm/provision/scylla_yaml/scylla_yaml.py b/sdcm/provision/scylla_yaml/scylla_yaml.py index b1d4b83fbdb..9729c4e8af3 100644 --- a/sdcm/provision/scylla_yaml/scylla_yaml.py +++ b/sdcm/provision/scylla_yaml/scylla_yaml.py @@ -372,7 +372,7 @@ def _update_dict(self, obj: dict, fields_data: dict): if not isinstance(attr_value, dict): raise ValueError("Unexpected data `%s` in attribute `%s`" % ( type(attr_value), attr_name)) - attr_value = attr_info.type(**attr_value) + attr_value = attr_info.type(**attr_value) # noqa: PLW2901 setattr(self, attr_name, attr_value) def update(self, *objects: Union['ScyllaYaml', dict]): diff --git a/sdcm/remote/libssh2_client/exceptions.py b/sdcm/remote/libssh2_client/exceptions.py index 5e98a37c844..b6f843cfcd6 100644 --- a/sdcm/remote/libssh2_client/exceptions.py +++ b/sdcm/remote/libssh2_client/exceptions.py @@ -102,11 +102,10 @@ def streams_for_display(self) -> tuple: stdout = self.result.tail("stdout") if self.result.pty: stderr = " n/a (PTYs have no stderr)" + elif "stderr" not in self.result.hide: + stderr = already_printed else: - if "stderr" not in self.result.hide: - stderr = already_printed - else: - stderr = self.result.tail("stderr") + stderr = self.result.tail("stderr") return stdout, stderr def __repr__(self) -> str: diff --git a/sdcm/results_analyze/__init__.py b/sdcm/results_analyze/__init__.py index a0d8e0ebeaa..73d94a1a2ad 100644 --- a/sdcm/results_analyze/__init__.py +++ b/sdcm/results_analyze/__init__.py @@ -1138,9 +1138,9 @@ def _add_best_for_info(test, subtest, metric_path, tests_info): def _mark_best_tests(self, prior_subtests, metrics, tests_info, main_test_id): main_tests_by_id = MagicList(tests_info.keys()).group_by('test_id') - for _, prior_tests in prior_subtests.items(): + for _, _prior_tests in prior_subtests.items(): prior_tests = MagicList( - [prior_test for prior_test in prior_tests if prior_test.main_test_id != main_test_id]) + [prior_test for prior_test in _prior_tests if prior_test.main_test_id != main_test_id]) if not prior_tests: continue for metric_path in metrics: @@ -1235,7 +1235,7 @@ def _cleanup_not_complete_main_tests(prior_main_tests: list, prior_subtests: dic for num in sorted(to_delete, reverse=True): prior_tests.pop(num) - def check_regression_multi_baseline( + def check_regression_multi_baseline( # noqa: PLR0912, PLR0915 self, test_id, subtests_info: list = None, diff --git a/sdcm/results_analyze/test.py b/sdcm/results_analyze/test.py index 99dd408ff2d..8c172ff2c91 100644 --- a/sdcm/results_analyze/test.py +++ b/sdcm/results_analyze/test.py @@ -437,7 +437,7 @@ def is_gce(self): def _get_es_filters(cls, depth=2): tmp = [] for es_filter in cls._get_all_es_data_mapping().values(): - es_filter = '.'.join(es_filter.split('.')[:depth]) + es_filter = '.'.join(es_filter.split('.')[:depth]) # noqa: PLW2901 if es_filter not in tmp: tmp.append(es_filter) return ['hits.hits.' + es_filter for es_filter in tmp] @@ -459,11 +459,11 @@ def _get_es_query_from_instance_data(cls, instance_data: dict): def _get_es_query_from_es_data(cls, es_data: dict): filters = [] for es_data_path, data_value in es_data.items(): - es_data_path = es_data_path.split('.') + es_data_path = es_data_path.split('.') # noqa: PLW2901 if es_data_path[0] == '_source': - es_data_path = es_data_path[1:] - es_data_path = '.'.join(es_data_path) - es_data_path = cls._escape_filter_key(es_data_path) + es_data_path = es_data_path[1:] # noqa: PLW2901 + es_data_path = '.'.join(es_data_path) # noqa: PLW2901 + es_data_path = cls._escape_filter_key(es_data_path) # noqa: PLW2901 if isinstance(data_value, str) and es_data_path not in cls._es_field_indexes and data_value != '*': filters.append(f'{es_data_path}.keyword: \"{data_value}\"') elif isinstance(data_value, bool): diff --git a/sdcm/sct_config.py b/sdcm/sct_config.py index 9e4a9cbd8e8..6ee24cef502 100644 --- a/sdcm/sct_config.py +++ b/sdcm/sct_config.py @@ -1669,7 +1669,7 @@ class SCTConfiguration(dict): ami_id_params = ['ami_id_db_scylla', 'ami_id_loader', 'ami_id_monitor', 'ami_id_db_cassandra', 'ami_id_db_oracle'] aws_supported_regions = ['eu-west-1', 'eu-west-2', 'us-west-2', 'us-east-1', 'eu-north-1', 'eu-central-1'] - def __init__(self): + def __init__(self): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-branches,too-many-statements super().__init__() self.scylla_version = None @@ -2113,7 +2113,7 @@ def list_of_stress_tools(self) -> Set[str]: if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: if stress_tool := cmd.split(maxsplit=2)[0]: stress_tools.add(stress_tool) @@ -2133,9 +2133,9 @@ def check_required_files(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if cmd.startswith('latte'): script_name_regx = re.compile(r'([/\w-]*\.rn)') script_name = script_name_regx.search(cmd).group(1) @@ -2146,7 +2146,7 @@ def check_required_files(self): continue for option in cmd.split(): if option.startswith('profile='): - option = option.split('=', 1) + option = option.split('=', 1) # noqa: PLW2901 if len(option) < 2: continue profile_path = option[1] @@ -2581,9 +2581,9 @@ def _verify_scylla_bench_mode_and_workload_parameters(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if not cmd.startswith('scylla-bench'): continue if "-mode=" not in cmd: diff --git a/sdcm/sct_events/events_processes.py b/sdcm/sct_events/events_processes.py index 54d89d95f1b..9ef0cd3137e 100644 --- a/sdcm/sct_events/events_processes.py +++ b/sdcm/sct_events/events_processes.py @@ -166,7 +166,7 @@ def __str__(self): def create_default_events_process_registry(log_dir: Union[str, Path]): - global _EVENTS_PROCESSES # pylint: disable=global-statement + global _EVENTS_PROCESSES # pylint: disable=global-statement # noqa: PLW0603 with _EVENTS_PROCESSES_LOCK: if _EVENTS_PROCESSES is None: diff --git a/sdcm/sct_runner.py b/sdcm/sct_runner.py index b93f6ab0707..dbdfc981ef1 100644 --- a/sdcm/sct_runner.py +++ b/sdcm/sct_runner.py @@ -1274,7 +1274,6 @@ def _manage_runner_keep_tag_value(utc_now: datetime, return sct_runner_info LOGGER.info("No changes to make to runner tags.") - return sct_runner_info def clean_sct_runners(test_status: str, @@ -1316,9 +1315,9 @@ def clean_sct_runners(test_status: str, LOGGER.info("UTC now: %s", utc_now) if not dry_run and test_runner_ip: - sct_runner_info = _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, - timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, - dry_run=dry_run) + _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, + timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, + dry_run=dry_run) if sct_runner_info.keep: if "alive" in str(sct_runner_info.keep): diff --git a/sdcm/send_email.py b/sdcm/send_email.py index 56573c6f8b8..5043681e779 100644 --- a/sdcm/send_email.py +++ b/sdcm/send_email.py @@ -581,7 +581,7 @@ class PerfSimpleQueryReporter(BaseEmailReporter): email_template_file = "results_perf_simple_query.html" -def build_reporter(name: str, +def build_reporter(name: str, # noqa: PLR0911 email_recipients: Sequence[str] = (), logdir: Optional[str] = None) -> Optional[BaseEmailReporter]: # pylint: disable=too-many-return-statements,too-many-branches diff --git a/sdcm/stress/latte_thread.py b/sdcm/stress/latte_thread.py index 3a5b8286f4f..9214b912092 100644 --- a/sdcm/stress/latte_thread.py +++ b/sdcm/stress/latte_thread.py @@ -86,9 +86,9 @@ def run(self): try: match = regex.search(line) if match: - for key, value in match.groupdict().items(): - value = float(value) - self.set_metric(self.operation, key, float(value)) + for key, _value in match.groupdict().items(): + value = float(_value) + self.set_metric(self.operation, key, value) except Exception: # pylint: disable=broad-except LOGGER.exception("fail to send metric") diff --git a/sdcm/stress_thread.py b/sdcm/stress_thread.py index 95686189dc9..179f01e6613 100644 --- a/sdcm/stress_thread.py +++ b/sdcm/stress_thread.py @@ -66,7 +66,7 @@ def run(self) -> None: if pattern.search(line): if event.severity == Severity.CRITICAL and not self.stop_test_on_failure: - event = event.clone() # so we don't change the severity to other stress threads + event = event.clone() # so we don't change the severity to other stress threads # noqa: PLW2901 event.severity = Severity.ERROR event.add_info(node=self.node, line=line, line_number=line_number).publish() break # Stop iterating patterns to avoid creating two events for one line of the log @@ -94,7 +94,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class CassandraStressThread(DockerBasedStressThread): # pylint: disable=too-many-instance-attributes DOCKER_IMAGE_PARAM_NAME = 'stress_image.cassandra-stress' - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments # noqa: PLR0913 profile=None, node_list=None, round_robin=False, client_encrypt=False, stop_test_on_failure=True, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, @@ -234,7 +234,7 @@ def _add_hdr_log_option(stress_cmd: str, hdr_log_name: str) -> str: cs_log_option = match.group(1) if "hdrfile" not in cs_log_option: stress_cmd = stress_cmd.replace("-log", f"-log hdrfile={hdr_log_name}") - else: + else: # noqa: PLR5501 if replacing_hdr_file := re.search(r"hdrfile=(.*?)\s", cs_log_option): stress_cmd = stress_cmd.replace( f"hdrfile={replacing_hdr_file.group(1)}", f"hdrfile={hdr_log_name}") diff --git a/sdcm/tester.py b/sdcm/tester.py index c388adc2d2e..40a811c13fc 100644 --- a/sdcm/tester.py +++ b/sdcm/tester.py @@ -583,8 +583,8 @@ def argus_collect_logs(self, log_links: dict[str, list[str] | str]): try: logs_to_save = [] for name, link in log_links.items(): - link = LogLink(log_name=name, log_link=link) - logs_to_save.append(link) + argus_link = LogLink(log_name=name, log_link=link) + logs_to_save.append(argus_link) self.test_config.argus_client().submit_sct_logs(logs_to_save) except Exception: # pylint: disable=broad-except self.log.error("Error saving logs to Argus", exc_info=True) @@ -1124,7 +1124,7 @@ def get_nemesis_class(self): self.log.debug("Nemesis threads %s", nemesis_threads) return nemesis_threads - def get_cluster_gce(self, loader_info, db_info, monitor_info): + def get_cluster_gce(self, loader_info, db_info, monitor_info): # noqa: PLR0912 # pylint: disable=too-many-locals,too-many-statements,too-many-branches if loader_info['n_nodes'] is None: n_loader_nodes = self.params.get('n_loaders') @@ -1902,7 +1902,7 @@ def run_stress(self, stress_cmd, duration=None): self.verify_stress_thread(cs_thread_pool=cs_thread_pool) # pylint: disable=too-many-arguments,too-many-return-statements - def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments + def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments # noqa: PLR0911, PLR0913 round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', use_single_loader=False, stop_test_on_failure=True): @@ -1945,7 +1945,7 @@ def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_nu raise ValueError(f'Unsupported stress command: "{stress_cmd[:50]}..."') # pylint: disable=too-many-arguments - def run_stress_cassandra_thread( + def run_stress_cassandra_thread( # noqa: PLR0913 self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', stop_test_on_failure=True, params=None, **_): # pylint: disable=too-many-locals @@ -1979,7 +1979,7 @@ def run_stress_cassandra_thread( return cs_thread # pylint: disable=too-many-arguments - def run_cql_stress_cassandra_thread( + def run_cql_stress_cassandra_thread( # noqa: PLR0913 self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', stop_test_on_failure=True, params=None, **_): # pylint: disable=too-many-locals @@ -2360,7 +2360,7 @@ def create_keyspace(self, keyspace_name, replication_factor): does_keyspace_exist = self.wait_validate_keyspace_existence(session, keyspace_name) return does_keyspace_exist - def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches + def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches # noqa: PLR0913 speculative_retry=None, read_repair=None, compression=None, gc_grace=None, columns=None, compaction=None, compact_storage=False, scylla_encryption_options=None, keyspace_name=None, @@ -2421,7 +2421,7 @@ def truncate_cf(self, ks_name: str, table_name: str, session: Session, truncate_ except Exception as ex: # pylint: disable=broad-except self.log.debug('Failed to truncate base table {0}.{1}. Error: {2}'.format(ks_name, table_name, str(ex))) - def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, + def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, # noqa: PLR0913 # pylint: disable=too-many-arguments mv_columns='*', speculative_retry=None, read_repair=None, compression=None, gc_grace=None, compact_storage=False): @@ -3505,12 +3505,11 @@ def get_nemesises_stats(self): nemesis_stats = {} if self.create_stats: nemesis_stats = self.get_doc_data(key='nemesis') + elif self.db_cluster: + for nem in self.db_cluster.nemesis: + nemesis_stats.update(nem.stats) else: - if self.db_cluster: - for nem in self.db_cluster.nemesis: - nemesis_stats.update(nem.stats) - else: - self.log.warning("No nemesises as cluster was not created") + self.log.warning("No nemesises as cluster was not created") if nemesis_stats: for detail in nemesis_stats.values(): diff --git a/sdcm/utils/cdc/options.py b/sdcm/utils/cdc/options.py index 9a9a4f21a66..a57907f4c35 100644 --- a/sdcm/utils/cdc/options.py +++ b/sdcm/utils/cdc/options.py @@ -60,12 +60,13 @@ def parse_cdc_blob_settings(blob: bytes) -> Dict[str, Union[bool, str]]: for regexp in CDC_SETTINGS_REGEXP: res = re.search(regexp, blob.decode()) if res: - for key, value in res.groupdict().items(): - if value in ("false", "off"): + for key, _value in res.groupdict().items(): + if _value in ("false", "off"): value = False - elif value == 'true': + elif _value == 'true': value = True - + else: + value = _value cdc_settings[key] = value return cdc_settings diff --git a/sdcm/utils/common.py b/sdcm/utils/common.py index 4bd8c9fb288..824a0246bdd 100644 --- a/sdcm/utils/common.py +++ b/sdcm/utils/common.py @@ -2277,15 +2277,14 @@ def download_dir_from_cloud(url): LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir) if os.path.isdir(tmp_dir) and os.listdir(tmp_dir): LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir)) + elif url.startswith('s3://'): + s3_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif url.startswith('gs://'): + gce_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif os.path.isdir(url): + tmp_dir = url else: - if url.startswith('s3://'): - s3_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif url.startswith('gs://'): - gce_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif os.path.isdir(url): - tmp_dir = url - else: - raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) + raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) if not tmp_dir.endswith('/'): tmp_dir += '/' LOGGER.info("Finished downloading [%s]", url) @@ -2941,7 +2940,7 @@ def walk_thru_data(data, path: str, separator: str = '/') -> Any: if not name: continue if name[0] == '[' and name[-1] == ']': - name = name[1:-1] + name = name[1:-1] # noqa: PLW2901 if name.isalnum() and isinstance(current_value, (list, tuple, set)): try: current_value = current_value[int(name)] diff --git a/sdcm/utils/data_validator.py b/sdcm/utils/data_validator.py index 37bfc1a07fc..91fc13f130b 100644 --- a/sdcm/utils/data_validator.py +++ b/sdcm/utils/data_validator.py @@ -462,33 +462,31 @@ def validate_range_not_expected_to_change(self, session, during_nemesis=False): message=f"Actual dataset length more then expected ({len(actual_result)} > {len(expected_result)}). " f"Issue #6181" ).publish() - else: - if not during_nemesis: - assert len(actual_result) == len(expected_result), \ - 'One or more rows are not as expected, suspected LWT wrong update. ' \ - 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), - len(expected_result)) + elif not during_nemesis: + assert len(actual_result) == len(expected_result), \ + 'One or more rows are not as expected, suspected LWT wrong update. ' \ + 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), + len(expected_result)) - assert actual_result == expected_result, \ - 'One or more rows are not as expected, suspected LWT wrong update' + assert actual_result == expected_result, \ + 'One or more rows are not as expected, suspected LWT wrong update' - # Raise info event at the end of the test only. - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.NORMAL, - message="Validation immutable rows finished successfully" - ).publish() - else: - if len(actual_result) < len(expected_result): - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.ERROR, - error=f"Verify immutable rows. " - f"One or more rows not found as expected, suspected LWT wrong update. " - f"Actual dataset length: {len(actual_result)}, " - f"Expected dataset length: {len(expected_result)}" - ).publish() - else: - LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', - len(actual_result), len(expected_result)) + # Raise info event at the end of the test only. + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.NORMAL, + message="Validation immutable rows finished successfully" + ).publish() + elif len(actual_result) < len(expected_result): + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.ERROR, + error=f"Verify immutable rows. " + f"One or more rows not found as expected, suspected LWT wrong update. " + f"Actual dataset length: {len(actual_result)}, " + f"Expected dataset length: {len(expected_result)}" + ).publish() + else: + LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', + len(actual_result), len(expected_result)) def list_of_view_names_for_update_test(self): # List of tuples of correlated view names for validation: before update, after update, expected data diff --git a/sdcm/utils/docker_utils.py b/sdcm/utils/docker_utils.py index cc5e7194ba0..4fe2f62da8b 100644 --- a/sdcm/utils/docker_utils.py +++ b/sdcm/utils/docker_utils.py @@ -176,8 +176,8 @@ def _get_attr_for_name(instance: INodeWithContainerManager, if not name_only_lookup: attr_candidate_list.append((attr, ())) - for attr_candidate, args in attr_candidate_list: - attr_candidate = getattr(instance, attr_candidate, None) + for _attr_candidate, args in attr_candidate_list: + attr_candidate = getattr(instance, _attr_candidate, None) if callable(attr_candidate): attr_candidate = attr_candidate(*args) if attr_candidate is not None: diff --git a/sdcm/utils/gce_utils.py b/sdcm/utils/gce_utils.py index 85f2d2e378c..28ec20e4a33 100644 --- a/sdcm/utils/gce_utils.py +++ b/sdcm/utils/gce_utils.py @@ -370,7 +370,7 @@ def disk_from_image( # pylint: disable=too-many-arguments return boot_disk -def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements +def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa: PLR0913 project_id: str, zone: str, instance_name: str, diff --git a/sdcm/utils/get_username.py b/sdcm/utils/get_username.py index e2914a587d4..c78386fab89 100644 --- a/sdcm/utils/get_username.py +++ b/sdcm/utils/get_username.py @@ -24,7 +24,7 @@ def get_email_user(email_addr: str) -> str: return email_addr.strip().split("@")[0] -def get_username() -> str: # pylint: disable=too-many-return-statements +def get_username() -> str: # pylint: disable=too-many-return-statements # noqa: PLR0911 # First we check if user is being impersonated by an api call actual_user_from_request = os.environ.get('BUILD_USER_REQUESTED_BY') if actual_user_from_request: diff --git a/sdcm/utils/k8s/__init__.py b/sdcm/utils/k8s/__init__.py index fb9fd137a58..5f99661bfa8 100644 --- a/sdcm/utils/k8s/__init__.py +++ b/sdcm/utils/k8s/__init__.py @@ -625,10 +625,10 @@ def gather_k8s_logs(cls, logdir_path, kubectl=None, namespaces=None) -> None: # resource_type, namespace) resource_dir = logdir / namespace_scope_dir / namespace / resource_type os.makedirs(resource_dir, exist_ok=True) - for res in resources_wide.split("\n"): - if not re.match(f"{namespace} ", res): + for _res in resources_wide.split("\n"): + if not re.match(f"{namespace} ", _res): continue - res = res.split()[1] + res = _res.split()[1] logfile = resource_dir / f"{res}.yaml" res_stdout = kubectl( f"get {resource_type}/{res} -o yaml 2>&1 | tee {logfile}", @@ -1068,7 +1068,7 @@ def register_callbacks(self, callbacks: Union[Callable, list[Callable]], for callback in callbacks: if callable(callback): - callback = [callback, [], {}] + callback = [callback, [], {}] # noqa: PLW2901 if (isinstance(callback, (tuple, list)) and len(callback) == 3 and callable(callback[0]) diff --git a/sdcm/utils/k8s/chaos_mesh.py b/sdcm/utils/k8s/chaos_mesh.py index 790eada1aa3..160e84b28b2 100644 --- a/sdcm/utils/k8s/chaos_mesh.py +++ b/sdcm/utils/k8s/chaos_mesh.py @@ -165,7 +165,7 @@ def start(self): self._end_time = time.time() + self._timeout # pylint: disable=too-many-return-statements - def get_status(self) -> ExperimentStatus: + def get_status(self) -> ExperimentStatus: # noqa: PLR0911 """Gets status of chaos-mesh experiment.""" result = self._k8s_cluster.kubectl( f"get {self.CHAOS_KIND} {self._name} -n {self._namespace} -o jsonpath='{{.status.conditions}}'", verbose=False) diff --git a/sdcm/utils/latency.py b/sdcm/utils/latency.py index 2b1013572bc..e9b06c5b524 100644 --- a/sdcm/utils/latency.py +++ b/sdcm/utils/latency.py @@ -32,7 +32,7 @@ def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): for precision in cassandra_stress_precision: metric = f'c-s {precision}' if precision == 'max' else f'c-s P{precision}' if not precision == 'max': - precision = f'perc_{precision}' + precision = f'perc_{precision}' # noqa: PLW2901 query = f'sct_cassandra_stress_{load_type}_gauge{{type="lat_{precision}"}}' query_res = prometheus.query(query, start, end) latency_values_lst = [] diff --git a/sdcm/utils/ldap.py b/sdcm/utils/ldap.py index b9944b0c9b5..54f01b43e2f 100644 --- a/sdcm/utils/ldap.py +++ b/sdcm/utils/ldap.py @@ -34,7 +34,7 @@ LDAP_PASSWORD = 'scylla-0' LDAP_ROLE = 'scylla_ldap' LDAP_USERS = ['scylla-qa', 'dummy-user'] -LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) +LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) # noqa: PLC3002 SASLAUTHD_AUTHENTICATOR = 'com.scylladb.auth.SaslauthdAuthenticator' diff --git a/sdcm/utils/log.py b/sdcm/utils/log.py index 24fc7999375..3f05f6afc67 100644 --- a/sdcm/utils/log.py +++ b/sdcm/utils/log.py @@ -79,14 +79,14 @@ def replace_vars(obj, variables, obj_type=None): if issubclass(obj_type, dict): output = {} for attr_name, attr_value in obj.items(): - attr_name = replace_vars(attr_name, variables) - attr_value = replace_vars(attr_value, variables) + attr_name = replace_vars(attr_name, variables) # noqa: PLW2901 + attr_value = replace_vars(attr_value, variables) # noqa: PLW2901 output[attr_name] = attr_value # deepcode ignore UnhashableKey: you get same keys type as source return output if issubclass(obj_type, list): output = [] for element in obj: - element = replace_vars(element, variables) + element = replace_vars(element, variables) # noqa: PLW2901 output.append(element) # deepcode ignore InfiniteLoopByCollectionModification: Not even close return output if issubclass(obj_type, tuple): diff --git a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py index e303ec09bf5..280a52742f5 100644 --- a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py +++ b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py @@ -117,9 +117,8 @@ def make_table_line_for_render(data): if "_per_op" in key: if diff < 1 + regression_limit: table_line["is_" + key + "_within_limits"] = True - else: - if diff > 1 - regression_limit: - table_line["is_" + key + "_within_limits"] = True + elif diff > 1 - regression_limit: + table_line["is_" + key + "_within_limits"] = True table_line[key + "_diff"] = round((diff - 1) * 100, 2) table_line[key] = round(table_line[key], 2) table_line["mad tps"] = round(table_line["mad tps"], 2) diff --git a/sdcm/utils/properties.py b/sdcm/utils/properties.py index e0ffebafafc..d759f6f0c2c 100644 --- a/sdcm/utils/properties.py +++ b/sdcm/utils/properties.py @@ -49,11 +49,11 @@ def deserialize(data: Union[str, TextIO]) -> PropertiesDict: if not line.strip() or line.lstrip()[0] == '#': output[line] = None continue - line = line.split('=', 1) - if len(line) == 2: - value = line[1] + line_splitted = line.split('=', 1) + if len(line_splitted) == 2: + value = line_splitted[1] comment_pos = value.find('#') if comment_pos >= 0: value = value[0:value] - output[line[0].strip()] = value.strip().strip('"').strip("'") + output[line_splitted[0].strip()] = value.strip().strip('"').strip("'") return output diff --git a/sdcm/utils/remote_logger.py b/sdcm/utils/remote_logger.py index 026d87af7ae..96dee805421 100644 --- a/sdcm/utils/remote_logger.py +++ b/sdcm/utils/remote_logger.py @@ -573,7 +573,7 @@ def _logger_cmd(self) -> str: return f"echo \"I`date -u +\"%m%d %H:%M:%S\"` {message}\" >> {self._target_log_file} 2>&1" -def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements +def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements # noqa: PLR0911 if logs_transport == 'docker': return DockerGeneralLogger(node, target_log_file) if logs_transport == 'kubectl': diff --git a/sdcm/utils/sstable/sstable_utils.py b/sdcm/utils/sstable/sstable_utils.py index 3635c0e13c5..c25e76004cc 100644 --- a/sdcm/utils/sstable/sstable_utils.py +++ b/sdcm/utils/sstable/sstable_utils.py @@ -57,6 +57,7 @@ def get_sstables(self, from_minutes_ago: int = 0): def check_that_sstables_are_encrypted(self, sstables=None, # pylint: disable=too-many-branches expected_bool_value: bool = True) -> list: + if not sstables: sstables = self.get_sstables() if isinstance(sstables, str): diff --git a/sdcm/utils/toppartition_util.py b/sdcm/utils/toppartition_util.py index 7e1d1ff8729..2f5cd36fb12 100644 --- a/sdcm/utils/toppartition_util.py +++ b/sdcm/utils/toppartition_util.py @@ -83,8 +83,8 @@ def _parse_toppartitions_output(output: str) -> dict: def verify_output(self, output: str): toppartition_result = self._parse_toppartitions_output(output) - for sampler in self._built_args['samplers'].split(','): - sampler = sampler.upper() + for _sampler in self._built_args['samplers'].split(','): + sampler = _sampler.upper() assert sampler in toppartition_result, "{} sampler not found in result".format(sampler) assert toppartition_result[sampler]['toppartitions'] == self._built_args['toppartition'], \ "Wrong expected and actual top partitions number for {} sampler".format(sampler) diff --git a/sdcm/utils/version_utils.py b/sdcm/utils/version_utils.py index 184176a34ed..e13522bab28 100644 --- a/sdcm/utils/version_utils.py +++ b/sdcm/utils/version_utils.py @@ -556,7 +556,7 @@ def resolve_latest_repo_symlink(url: str) -> str: continuation_token = "BEGIN" while continuation_token: for build in s3_objects.get("CommonPrefixes", []): - build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] + build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] # noqa: PLW2901 if build == LATEST_SYMLINK_NAME: continue timestamp = NO_TIMESTAMP @@ -704,13 +704,13 @@ def __call__(self, func): self.VERSIONS[(func.__name__, func.__code__.co_filename)] = {} for min_v, max_v in self.min_max_version_pairs: scylla_type = "enterprise" if any((is_enterprise(v) for v in (min_v, max_v) if v)) else "oss" - min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") - max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") + min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") # noqa: PLW2901 + max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") # noqa: PLW2901 if max_v.count(".") == 1: # NOTE: version parse function considers 4.4 as lower than 4.4.1, # but we expect it to be any of the 4.4.x versions. # So, update all such short versions with the patch part and make it to be huge. - max_v = f"{max_v}.999" + max_v = f"{max_v}.999" # noqa: PLW2901 self.VERSIONS[(func.__name__, func.__code__.co_filename)].update({(min_v, max_v): func}) @wraps(func) @@ -828,7 +828,7 @@ def find_scylla_repo(scylla_version, dist_type='centos', dist_version=None): for key in repo_map: if scylla_version.startswith(key): return repo_map[key] - else: + else: # noqa: PLW0120 raise ValueError(f"repo for scylla version {scylla_version} wasn't found") diff --git a/sdcm/ycsb_thread.py b/sdcm/ycsb_thread.py index 05995e89a39..db3537a1e6e 100644 --- a/sdcm/ycsb_thread.py +++ b/sdcm/ycsb_thread.py @@ -103,9 +103,9 @@ def run(self): for key, value in match.groupdict().items(): if not key == 'count': try: - value = float(value) / 1000.0 + value = float(value) / 1000.0 # noqa: PLW2901 except ValueError: - value = float(0) + value = float(0) # noqa: PLW2901 self.set_metric(operation, key, float(value)) except Exception: # pylint: disable=broad-except @@ -126,7 +126,7 @@ def copy_template(self, cmd_runner, loader_name, memo={}): # pylint: disable=da web_protocol = "http" + ("s" if self.params.get("alternator_port") == 8043 else "") elif self.params.get('alternator_use_dns_routing'): target_address = 'alternator' - else: + else: # noqa: PLR5501 if hasattr(self.node_list[0], 'parent_cluster'): target_address = self.node_list[0].parent_cluster.get_node().cql_address else: diff --git a/sla_per_user_system_test.py b/sla_per_user_system_test.py index f0fc88f9fb5..758da53882b 100644 --- a/sla_per_user_system_test.py +++ b/sla_per_user_system_test.py @@ -836,7 +836,7 @@ def _compare_workloads_c_s_metrics(self, workloads_queue: list) -> dict: assert len(workloads_results) == 2, \ "Expected workload_results length to be 2, got: %s. workload_results: %s" % ( - len(workloads_results), workloads_results) + len(workloads_results), workloads_results) comparison_results = {} try: for item, target_margin in comparison_axis.items(): diff --git a/unit_tests/lib/fake_provisioner.py b/unit_tests/lib/fake_provisioner.py index 55fdeba40e9..028a40085c7 100644 --- a/unit_tests/lib/fake_provisioner.py +++ b/unit_tests/lib/fake_provisioner.py @@ -76,7 +76,7 @@ def reboot_instance(self, name: str, wait: bool, hard: bool = False) -> None: def run_command(self, name: str, command: str) -> Result: """Runs command on instance.""" - return subprocess.run(command, shell=True, capture_output=True, text=True) # pylint: disable=subprocess-run-check + return subprocess.run(command, shell=True, capture_output=True, text=True, check=False) # pylint: disable=subprocess-run-check @classmethod def discover_regions(cls, test_id: str, **kwargs) -> List[Provisioner]: # pylint: disable=unused-argument diff --git a/unit_tests/provisioner/fake_azure_service.py b/unit_tests/provisioner/fake_azure_service.py index e866967f597..4fdf13de060 100644 --- a/unit_tests/provisioner/fake_azure_service.py +++ b/unit_tests/provisioner/fake_azure_service.py @@ -73,8 +73,8 @@ def create_or_update(self, resource_group_name: str, parameters: Dict[str, Any]) } res_group.update(**parameters) (self.path / resource_group_name).mkdir(exist_ok=True) - with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file: - json.dump(res_group, fp=file, indent=2) + with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file_obj: + json.dump(res_group, fp=file_obj, indent=2) return ResourceGroup.deserialize(res_group) def get(self, name) -> ResourceGroup: @@ -109,9 +109,9 @@ def list(self, resource_group_name: str) -> List[NetworkSecurityGroup]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkSecurityGroup.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkSecurityGroup.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, @@ -179,9 +179,9 @@ def list(self, resource_group_name: str) -> List[VirtualNetwork]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(VirtualNetwork.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(VirtualNetwork.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, parameters: Dict[str, Any] @@ -230,9 +230,9 @@ def list(self, resource_group_name: str, virtual_network_name: str) -> List[Subn except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(Subnet.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(Subnet.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, subnet_name: str, @@ -280,9 +280,9 @@ def list(self, resource_group_name: str) -> List[PublicIPAddress]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(PublicIPAddress.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(PublicIPAddress.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, public_ip_address_name: str, parameters: Dict[str, Any] @@ -310,8 +310,8 @@ def begin_create_or_update(self, resource_group_name: str, public_ip_address_nam "provisioningState": "Succeeded" } } - with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file: - json.dump(base, fp=file, indent=2) + with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file_obj: + json.dump(base, fp=file_obj, indent=2) return WaitableObject() def get(self, resource_group_name: str, public_ip_address_name: str) -> PublicIPAddress: @@ -337,9 +337,9 @@ def list(self, resource_group_name: str) -> List[NetworkInterface]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkInterface.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkInterface.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_interface_name: str, parameters: Dict[str, Any] @@ -584,7 +584,7 @@ def begin_restart(self, resource_group_name, vm_name # pylint: disable=unused-a # pylint: disable=unused-argument,no-self-use def begin_run_command(self, resource_group_name, vm_name, parameters) -> ResultableObject: result = subprocess.run(parameters.script[0], shell=True, capture_output=True, # pylint: disable=subprocess-run-check - text=True) + text=True, check=False) return ResultableObject(result.stdout, result.stderr) diff --git a/unit_tests/test_sct_events_base.py b/unit_tests/test_sct_events_base.py index f418b2299b7..c7045006af5 100644 --- a/unit_tests/test_sct_events_base.py +++ b/unit_tests/test_sct_events_base.py @@ -137,7 +137,7 @@ class Z(SctEvent): self.assertNotEqual(z, y) def test_equal_pickle_unpickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): pass @@ -372,7 +372,7 @@ class Mixin: self.assertEqual(yt.attr1, "value1") def test_add_subevent_type_pickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): T: Type[SctEvent] @@ -493,7 +493,7 @@ class Y(LogEvent): self.assertTrue(y._ready_to_publish) def test_clone_fresh(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass @@ -516,7 +516,7 @@ class Y(LogEvent): self.assertIsInstance(y, SctEventProtocol) def test_clone_with_info(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass diff --git a/unit_tests/test_version_utils.py b/unit_tests/test_version_utils.py index 363ed33d15f..b9d85f14320 100644 --- a/unit_tests/test_version_utils.py +++ b/unit_tests/test_version_utils.py @@ -217,14 +217,13 @@ def __init__(self, scylla_version, nemesis_like_class): node_scylla_version = "2023.1.dev" elif scylla_version.startswith('master:') or scylla_version == "": node_scylla_version = "4.7.dev" + elif ":" in scylla_version: + node_scylla_version = scylla_version.split(":")[0] + if node_scylla_version.count(".") < 1: + node_scylla_version += ".0" + node_scylla_version += ".dev" else: - if ":" in scylla_version: - node_scylla_version = scylla_version.split(":")[0] - if node_scylla_version.count(".") < 1: - node_scylla_version += ".0" - node_scylla_version += ".dev" - else: - node_scylla_version = scylla_version + node_scylla_version = scylla_version nodes = [type("Node", (object,), {"scylla_version": node_scylla_version})] if nemesis_like_class: self.cluster = type("Cluster", (object,), { diff --git a/upgrade_schema_test.py b/upgrade_schema_test.py index d361f3d7066..9954b245d58 100644 --- a/upgrade_schema_test.py +++ b/upgrade_schema_test.py @@ -258,8 +258,8 @@ def _get_thrift_client(self, host, port=9160): # 9160 def test_upgrade_schema(self): - global thrift_client - global cql_client + global thrift_client # noqa: PLW0603 + global cql_client # noqa: PLW0603 ips = [] for node in self.db_cluster.nodes: ips.append(node.public_ip_address) diff --git a/upgrade_test.py b/upgrade_test.py index c5e1f6740fa..58a9a8f19fd 100644 --- a/upgrade_test.py +++ b/upgrade_test.py @@ -195,7 +195,7 @@ def upgrade_node(self, node, upgrade_sstables=True): @decorate_with_context(ignore_abort_requested_errors) # https://github.com/scylladb/scylla/issues/10447#issuecomment-1194155163 - def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): + def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): # noqa: PLR0915 # pylint: disable=too-many-branches,too-many-statements new_scylla_repo = new_scylla_repo or self.params.get('new_scylla_repo') new_version = new_version or self.params.get('new_version') @@ -310,7 +310,7 @@ def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_v InfoEvent(message='upgrade_node - starting to "daemon-reload"').publish() node.remoter.run('sudo systemctl daemon-reload') InfoEvent(message='upgrade_node - ended to "daemon-reload"').publish() - else: + else: # noqa: PLR5501 if node.distro.is_rhel_like: InfoEvent(message='upgrade_node - starting to "yum update"').publish() node.remoter.run(r'sudo yum update {}\* -y'.format(scylla_pkg_ver)) @@ -605,7 +605,7 @@ def _update_scylla_yaml_on_node(node_to_update: BaseNode, updates: dict): with node_to_update.remote_scylla_yaml() as scylla_yaml: scylla_yaml.update(updates) - def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements + def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 """ Upgrade half of nodes in the cluster, and start special read workload during the stage. Checksum method is changed to xxhash from Scylla 2.2, diff --git a/utils/build_system/create_test_release_jobs.py b/utils/build_system/create_test_release_jobs.py index a37cfaf8670..995d2e622bc 100644 --- a/utils/build_system/create_test_release_jobs.py +++ b/utils/build_system/create_test_release_jobs.py @@ -178,7 +178,7 @@ def create_job_tree(self, local_path: str | Path, # pylint: disable=too-many-ar self.create_directory(jenkins_path, display_name=display_name) for job_file in job_files: - job_file = Path(root) / job_file + job_file = Path(root) / job_file # noqa: PLW2901 if (job_file.suffix == '.jenkinsfile') and create_pipelines_jobs: self.create_pipeline_job(job_file, group_name=jenkins_path, job_name_suffix=job_name_suffix) if (job_file.suffix == '.xml') and create_freestyle_jobs: diff --git a/utils/cloud_cleanup/azure/clean_azure.py b/utils/cloud_cleanup/azure/clean_azure.py index 92fc04b9c56..4fd62fae2e0 100755 --- a/utils/cloud_cleanup/azure/clean_azure.py +++ b/utils/cloud_cleanup/azure/clean_azure.py @@ -146,12 +146,11 @@ def clean_azure_instances(dry_run=False): if should_keep(creation_time=get_vm_creation_time(v_m, resource_group.name), keep_hours=get_keep_hours(v_m)): LOGGER.info("Keeping VM: %s in resource group: %s", v_m.name, resource_group.name) clean_group = False # skip cleaning group if there's at least one VM to keep + elif get_keep_action(v_m) == "terminate": + vms_to_process.append((delete_virtual_machine, v_m.name)) else: - if get_keep_action(v_m) == "terminate": - vms_to_process.append((delete_virtual_machine, v_m.name)) - else: - vms_to_process.append((stop_virtual_machine, v_m.name)) - clean_group = False # skip cleaning group if there's at least one VM to stop + vms_to_process.append((stop_virtual_machine, v_m.name)) + clean_group = False # skip cleaning group if there's at least one VM to stop if clean_group: delete_resource_group(resource_group.name, dry_run=dry_run)