diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 27f69fbee3..b32798cb43 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,11 +39,10 @@ repos: types: [python] exclude: '\.sh$' - - id: pylint - name: pylint - entry: pylint -j 2 -d consider-using-f-string + - id: ruff + name: ruff + entry: ruff check --fix language: system - exclude: ^docker/alternator-dns/.*$ types: [python] - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook diff --git a/artifacts_test.py b/artifacts_test.py index 6b73fc982a..5a3dbe0872 100644 --- a/artifacts_test.py +++ b/artifacts_test.py @@ -469,7 +469,7 @@ def get_email_data(self): email_data = self._get_common_email_data() try: node = self.node - except Exception: # pylint: disable=broad-except + except (ValueError, IndexError): node = None if node: scylla_packages = node.scylla_packages_installed diff --git a/docker/alternator-dns/dns_server.py b/docker/alternator-dns/dns_server.py index e1537738f4..8f52d0757a 100644 --- a/docker/alternator-dns/dns_server.py +++ b/docker/alternator-dns/dns_server.py @@ -20,8 +20,8 @@ def livenodes_update(): - global alternator_port - global livenodes + global alternator_port # noqa: PLW0602 + global livenodes # noqa: PLW0603 while True: # Contact one of the already known nodes by random, to fetch a new # list of known nodes. @@ -34,7 +34,7 @@ def livenodes_update(): # If we're successful, replace livenodes by the new list livenodes = a print(livenodes) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 # TODO: contacting this ip was unsuccessful, maybe we should # remove it from the list of live nodes. pass diff --git a/docker/env/version b/docker/env/version index 029831272f..4de3bf6a73 100644 --- a/docker/env/version +++ b/docker/env/version @@ -1 +1 @@ -1.69-update-scylla-driver-3.26.8 +1.70-introduce-ruff diff --git a/functional_tests/scylla_operator/conftest.py b/functional_tests/scylla_operator/conftest.py index 67e543ec63..0735021d02 100644 --- a/functional_tests/scylla_operator/conftest.py +++ b/functional_tests/scylla_operator/conftest.py @@ -70,7 +70,7 @@ def publish_test_result(): @pytest.fixture(autouse=True, scope='package', name="tester") def fixture_tester() -> ScyllaOperatorFunctionalClusterTester: - global TESTER # pylint: disable=global-statement + global TESTER # pylint: disable=global-statement # noqa: PLW0603 os.chdir(sct_abs_path()) tester_inst = ScyllaOperatorFunctionalClusterTester() TESTER = tester_inst # putting tester global, so we can report skipped test (one with mark.skip) @@ -175,7 +175,7 @@ def _bring_cluster_back_to_original_state( db_cluster.restart_scylla() db_cluster.wait_for_nodes_up_and_normal( nodes=db_cluster.nodes, verification_node=db_cluster.nodes[0]) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 tester.healthy_flag = False pytest.fail("Failed to bring cluster nodes back to original state due to :\n" + "".join(traceback.format_exception(type(exc), exc, exc.__traceback__))) diff --git a/functional_tests/scylla_operator/test_functional.py b/functional_tests/scylla_operator/test_functional.py index 1a18d46554..a43e98b02d 100644 --- a/functional_tests/scylla_operator/test_functional.py +++ b/functional_tests/scylla_operator/test_functional.py @@ -322,7 +322,7 @@ def wait_for_cleanup_logs(log_follower_name, log_follower, db_cluster): time.sleep(4) db_cluster.nodes[0].run_cqlsh(cmd=f"DROP KEYSPACE IF EXISTS {current_ks_name}", timeout=60) time.sleep(4) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 # NOTE: we don't care if some of the queries fail. # At first, there are redundant ones and, at second, they are utilitary. log.warning("Utilitary CQL query has failed: %s", exc) @@ -646,7 +646,7 @@ def change_cluster_spec() -> None: # NOTE: increase the value only when the sysctl spec update is successful # to avoid false negative results in further assertions expected_aio_max_nr_value += 1 - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 str_error = str(error) log.debug("Change /spec/sysctls value to %d failed. Error: %s", expected_aio_max_nr_value, str_error) diff --git a/longevity_test.py b/longevity_test.py index 71e0e06f1e..43d2d0982d 100644 --- a/longevity_test.py +++ b/longevity_test.py @@ -329,7 +329,7 @@ def chunks(_list, chunk_size): self._pre_create_templated_user_schema(batch_start=extra_tables_idx, batch_end=extra_tables_idx+num_of_newly_created_tables) for i in range(num_of_newly_created_tables): - batch += self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile) + batch.append(self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile)) nodes_ips = self.all_node_ips_for_stress_command for params in batch: @@ -464,13 +464,9 @@ def _flush_all_nodes(self): def get_email_data(self): self.log.info("Prepare data for email") - email_data = {} grafana_dataset = {} - try: - email_data = self._get_common_email_data() - except Exception as error: # pylint: disable=broad-except - self.log.exception("Error in gathering common email data: Error:\n%s", error, exc_info=error) + email_data = self._get_common_email_data() try: grafana_dataset = self.monitors.get_grafana_screenshot_and_snapshot( diff --git a/mgmt_cli_test.py b/mgmt_cli_test.py index 7cfcf4cb4c..e4d6afc88e 100644 --- a/mgmt_cli_test.py +++ b/mgmt_cli_test.py @@ -848,6 +848,7 @@ def test_repair_multiple_keyspace_types(self): # pylint: disable=invalid-name keyspace_repair_percentage = per_keyspace_progress.get(keyspace_name, None) assert keyspace_repair_percentage is not None, \ "The keyspace {} was not included in the repair!".format(keyspace_name) + assert keyspace_repair_percentage == 100, \ "The repair of the keyspace {} stopped at {}%".format( keyspace_name, keyspace_repair_percentage) diff --git a/mgmt_upgrade_test.py b/mgmt_upgrade_test.py index a456d55e88..b05a254e16 100644 --- a/mgmt_upgrade_test.py +++ b/mgmt_upgrade_test.py @@ -277,9 +277,8 @@ def validate_previous_task_details(task, previous_task_details): # and as a result it could be a BIT imprecise if abs(delta.total_seconds()) > 60: mismatched_details_name_list.append(detail_name) - else: - if current_value != previous_task_details[detail_name]: - mismatched_details_name_list.append(detail_name) + elif current_value != previous_task_details[detail_name]: + mismatched_details_name_list.append(detail_name) complete_error_description = _create_mismatched_details_error_message(previous_task_details, current_task_details, mismatched_details_name_list) diff --git a/performance_regression_row_level_repair_test.py b/performance_regression_row_level_repair_test.py index 3b5bfc7ee7..00be4fcf35 100644 --- a/performance_regression_row_level_repair_test.py +++ b/performance_regression_row_level_repair_test.py @@ -80,7 +80,7 @@ def preload_data(self, consistency_level=None): for stress_cmd in prepare_write_cmd: if consistency_level: - stress_cmd = self._update_cl_in_stress_cmd( + stress_cmd = self._update_cl_in_stress_cmd( # noqa: PLW2901 str_stress_cmd=stress_cmd, consistency_level=consistency_level) params.update({'stress_cmd': stress_cmd}) diff --git a/performance_regression_test.py b/performance_regression_test.py index 4414a81f76..b0e9049f3a 100644 --- a/performance_regression_test.py +++ b/performance_regression_test.py @@ -176,7 +176,7 @@ def display_results(self, results, test_name=''): with open(os.path.join(self.logdir, 'jenkins_perf_PerfPublisher.xml'), 'w', encoding="utf-8") as pref_file: content = """%s""" % (test_name, test_xml) pref_file.write(content) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.debug('Failed to display results: {0}'.format(results)) self.log.debug('Exception: {0}'.format(ex)) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..a1f2bddf56 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,16 @@ +[tool.ruff] +lint.select = ["PL", "YTT", "BLE"] + +lint.ignore = ["E501", "PLR2004"] + +target-version = "py310" + +force-exclude = true +line-length = 240 +respect-gitignore = true + + +[tool.ruff.lint.pylint] +max-args = 12 +max-statements = 100 +max-branches = 24 diff --git a/requirements.in b/requirements.in index dc036ffc1f..421892b36d 100644 --- a/requirements.in +++ b/requirements.in @@ -30,7 +30,7 @@ python-jenkins==1.7.0 ssh2-python==1.0.0 argus-alm==0.12.3 parameterized==0.8.1 -pylint==2.11.1 # Needed for pre-commit hooks +ruff==0.4.7 # Needed for pre-commit hooks autopep8==1.5.7 # Needed for pre-commit hooks kubernetes==24.2.0 packaging==21.3 diff --git a/requirements.txt b/requirements.txt index 415947a268..a9c6e2444d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -103,10 +103,6 @@ argus-alm==0.12.3 \ --hash=sha256:635f4aac99f6da0e04ed064cda49cac5a45741c7f848c10fbd717e1809278eaf \ --hash=sha256:9264a554c7f7b2f8cd364424ad043d0a48a70416f5a5f41fc2bb62030cec6f1b # via -r requirements.in -astroid==2.8.6 \ - --hash=sha256:5f6f75e45f15290e73b56f9dfde95b4bf96382284cde406ef4203e928335a495 \ - --hash=sha256:cd8326b424c971e7d87678609cf6275d22028afd37d6ac59c16d47f1245882f6 - # via pylint async-timeout==4.0.3 \ --hash=sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f \ --hash=sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028 @@ -134,9 +130,9 @@ azure-common==1.1.28 \ # azure-mgmt-resource # azure-mgmt-resourcegraph # azure-mgmt-subscription -azure-core==1.30.1 \ - --hash=sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f \ - --hash=sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74 +azure-core==1.30.2 \ + --hash=sha256:a14dc210efcd608821aa472d9fb8e8d035d29b68993819147bc290a8ac224472 \ + --hash=sha256:cf019c1ca832e96274ae85abd3d9f752397194d9fea3b41487290562ac8abe4a # via # azure-identity # azure-mgmt-core @@ -235,9 +231,9 @@ botocore==1.31.4 \ # awscli # boto3 # s3transfer -botocore-stubs==1.34.94 \ - --hash=sha256:64d80a3467e3b19939e9c2750af33328b3087f8f524998dbdf7ed168227f507d \ - --hash=sha256:b0345f55babd8b901c53804fc5c326a4a0bd2e23e3b71f9ea5d9f7663466e6ba +botocore-stubs==1.34.127 \ + --hash=sha256:8aabb7b22e2b19df94dd72bed6b849f2146452a2aa501554d619ef76287bb2d6 \ + --hash=sha256:f78543fe93c21634458090d85cce68edd0c994f87a615fc6e91af4d26e96717f # via boto3-stubs build==1.2.1 \ --hash=sha256:526263f4870c26f26c433545579475377b2b7588b6f1eac76a001e873ae3e19d \ @@ -423,39 +419,39 @@ colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 # via awscli -cryptography==42.0.7 \ - --hash=sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55 \ - --hash=sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785 \ - --hash=sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b \ - --hash=sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886 \ - --hash=sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82 \ - --hash=sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1 \ - --hash=sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda \ - --hash=sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f \ - --hash=sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68 \ - --hash=sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60 \ - --hash=sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7 \ - --hash=sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd \ - --hash=sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582 \ - --hash=sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc \ - --hash=sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858 \ - --hash=sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b \ - --hash=sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2 \ - --hash=sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678 \ - --hash=sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13 \ - --hash=sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4 \ - --hash=sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8 \ - --hash=sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604 \ - --hash=sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477 \ - --hash=sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e \ - --hash=sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a \ - --hash=sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9 \ - --hash=sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14 \ - --hash=sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda \ - --hash=sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da \ - --hash=sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562 \ - --hash=sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2 \ - --hash=sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9 +cryptography==42.0.8 \ + --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \ + --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \ + --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \ + --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \ + --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \ + --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \ + --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \ + --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \ + --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \ + --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \ + --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \ + --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \ + --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \ + --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \ + --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \ + --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \ + --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \ + --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \ + --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \ + --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \ + --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \ + --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \ + --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \ + --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \ + --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \ + --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \ + --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \ + --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \ + --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \ + --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \ + --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \ + --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e # via # azure-identity # azure-storage-blob @@ -516,9 +512,9 @@ fido2==0.9.3 \ # via # ctap-keyring-device # gimme-aws-creds -filelock==3.14.0 \ - --hash=sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f \ - --hash=sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a +filelock==3.15.1 \ + --hash=sha256:58a2549afdf9e02e10720eaa4d4470f56386d7a6f72edd7d0596337af8ed7ad8 \ + --hash=sha256:71b3102950e91dfc1bb4209b64be4dc8854f40e5f534428d8684f953ac847fac # via virtualenv flatdict==4.0.1 \ --hash=sha256:cd32f08fd31ed21eb09ebc76f06b6bd12046a24f77beb1fd0281917e47f26742 @@ -632,9 +628,9 @@ google-api-python-client==2.93.0 \ --hash=sha256:62ee28e96031a10a1c341f226a75ac6a4f16bdb1d888dc8222b2cdca133d0031 \ --hash=sha256:f34abb671afd488bd19d30721ea20fb30d3796ddd825d6f91f26d8c718a9f07d # via -r requirements.in -google-auth==2.29.0 \ - --hash=sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360 \ - --hash=sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415 +google-auth==2.30.0 \ + --hash=sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5 \ + --hash=sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688 # via # google-api-core # google-api-python-client @@ -728,9 +724,9 @@ google-crc32c==1.5.0 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.7.0 \ - --hash=sha256:5f18f5fa9836f4b083162064a1c2c98c17239bfda9ca50ad970ccf905f3e625b \ - --hash=sha256:79543cfe433b63fd81c0844b7803aba1bb8950b47bedf7d980c38fa123937e08 +google-resumable-media==2.7.1 \ + --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \ + --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33 # via google-cloud-storage googleapis-common-protos==1.63.1 \ --hash=sha256:0e1c2cdfcbc354b76e4a211a35ea35d6926a835cba1377073c4861db904a1877 \ @@ -842,10 +838,6 @@ isodate==0.6.1 \ # azure-mgmt-compute # azure-storage-blob # msrest -isort==5.13.2 \ - --hash=sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109 \ - --hash=sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6 - # via pylint jaraco-classes==3.4.0 \ --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 @@ -892,45 +884,6 @@ kubernetes==24.2.0 \ --hash=sha256:9900f12ae92007533247167d14cdee949cd8c7721f88b4a7da5f5351da3834cd \ --hash=sha256:da19d58865cf903a8c7b9c3691a2e6315d583a98f0659964656dfdf645bf7e49 # via -r requirements.in -lazy-object-proxy==1.10.0 \ - --hash=sha256:009e6bb1f1935a62889ddc8541514b6a9e1fcf302667dcb049a0be5c8f613e56 \ - --hash=sha256:02c83f957782cbbe8136bee26416686a6ae998c7b6191711a04da776dc9e47d4 \ - --hash=sha256:0aefc7591920bbd360d57ea03c995cebc204b424524a5bd78406f6e1b8b2a5d8 \ - --hash=sha256:127a789c75151db6af398b8972178afe6bda7d6f68730c057fbbc2e96b08d282 \ - --hash=sha256:18dd842b49456aaa9a7cf535b04ca4571a302ff72ed8740d06b5adcd41fe0757 \ - --hash=sha256:217138197c170a2a74ca0e05bddcd5f1796c735c37d0eee33e43259b192aa424 \ - --hash=sha256:2297f08f08a2bb0d32a4265e98a006643cd7233fb7983032bd61ac7a02956b3b \ - --hash=sha256:2fc0a92c02fa1ca1e84fc60fa258458e5bf89d90a1ddaeb8ed9cc3147f417255 \ - --hash=sha256:30b339b2a743c5288405aa79a69e706a06e02958eab31859f7f3c04980853b70 \ - --hash=sha256:366c32fe5355ef5fc8a232c5436f4cc66e9d3e8967c01fb2e6302fd6627e3d94 \ - --hash=sha256:3ad54b9ddbe20ae9f7c1b29e52f123120772b06dbb18ec6be9101369d63a4074 \ - --hash=sha256:5ad9e6ed739285919aa9661a5bbed0aaf410aa60231373c5579c6b4801bd883c \ - --hash=sha256:5faf03a7d8942bb4476e3b62fd0f4cf94eaf4618e304a19865abf89a35c0bbee \ - --hash=sha256:75fc59fc450050b1b3c203c35020bc41bd2695ed692a392924c6ce180c6f1dc9 \ - --hash=sha256:76a095cfe6045c7d0ca77db9934e8f7b71b14645f0094ffcd842349ada5c5fb9 \ - --hash=sha256:78247b6d45f43a52ef35c25b5581459e85117225408a4128a3daf8bf9648ac69 \ - --hash=sha256:782e2c9b2aab1708ffb07d4bf377d12901d7a1d99e5e410d648d892f8967ab1f \ - --hash=sha256:7ab7004cf2e59f7c2e4345604a3e6ea0d92ac44e1c2375527d56492014e690c3 \ - --hash=sha256:80b39d3a151309efc8cc48675918891b865bdf742a8616a337cb0090791a0de9 \ - --hash=sha256:80fa48bd89c8f2f456fc0765c11c23bf5af827febacd2f523ca5bc1893fcc09d \ - --hash=sha256:855e068b0358ab916454464a884779c7ffa312b8925c6f7401e952dcf3b89977 \ - --hash=sha256:92f09ff65ecff3108e56526f9e2481b8116c0b9e1425325e13245abfd79bdb1b \ - --hash=sha256:952c81d415b9b80ea261d2372d2a4a2332a3890c2b83e0535f263ddfe43f0d43 \ - --hash=sha256:9a3a87cf1e133e5b1994144c12ca4aa3d9698517fe1e2ca82977781b16955658 \ - --hash=sha256:9e4ed0518a14dd26092614412936920ad081a424bdcb54cc13349a8e2c6d106a \ - --hash=sha256:a899b10e17743683b293a729d3a11f2f399e8a90c73b089e29f5d0fe3509f0dd \ - --hash=sha256:b1f711e2c6dcd4edd372cf5dec5c5a30d23bba06ee012093267b3376c079ec83 \ - --hash=sha256:b4f87d4ed9064b2628da63830986c3d2dca7501e6018347798313fcf028e2fd4 \ - --hash=sha256:cb73507defd385b7705c599a94474b1d5222a508e502553ef94114a143ec6696 \ - --hash=sha256:dc0d2fc424e54c70c4bc06787e4072c4f3b1aa2f897dfdc34ce1013cf3ceef05 \ - --hash=sha256:e221060b701e2aa2ea991542900dd13907a5c90fa80e199dbf5a03359019e7a3 \ - --hash=sha256:e271058822765ad5e3bca7f05f2ace0de58a3f4e62045a8c90a0dfd2f8ad8cc6 \ - --hash=sha256:e2adb09778797da09d2b5ebdbceebf7dd32e2c96f79da9052b2e87b6ea495895 \ - --hash=sha256:e333e2324307a7b5d86adfa835bb500ee70bfcd1447384a822e96495796b0ca4 \ - --hash=sha256:e98c8af98d5707dcdecc9ab0863c0ea6e88545d42ca7c3feffb6b4d1e370c7ba \ - --hash=sha256:edb45bb8278574710e68a6b021599a10ce730d156e5b254941754a9cc0b17d03 \ - --hash=sha256:fec03caabbc6b59ea4a638bee5fce7117be8e99a4103d9d5ad77f15d6f81020c - # via astroid ldap3==2.9.1 \ --hash=sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70 \ --hash=sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f @@ -1010,19 +963,15 @@ mbstrdecoder==1.1.3 \ # sqliteschema # subprocrunner # typepy -mccabe==0.6.1 \ - --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ - --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f - # via pylint -more-itertools==10.2.0 \ - --hash=sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684 \ - --hash=sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1 +more-itertools==10.3.0 \ + --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \ + --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320 # via # jaraco-classes # jaraco-functools -msal==1.28.0 \ - --hash=sha256:3064f80221a21cd535ad8c3fafbb3a3582cd9c7e9af0bb789ae14f726a0ca99b \ - --hash=sha256:80bbabe34567cb734efd2ec1869b2d98195c927455369d8077b3c542088c5c9d +msal==1.28.1 \ + --hash=sha256:563c2d70de77a2ca9786aab84cb4e133a38a6897e6676774edc23d610bfc9e7b \ + --hash=sha256:d72bbfe2d5c2f2555f4bc6205be4450ddfd12976610dd9a16a9ab0f05c68b64d # via # azure-identity # msal-extensions @@ -1195,53 +1144,53 @@ orderedmultidict==1.0.1 \ --hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \ --hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3 # via furl -orjson==3.10.3 \ - --hash=sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2 \ - --hash=sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b \ - --hash=sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5 \ - --hash=sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b \ - --hash=sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0 \ - --hash=sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0 \ - --hash=sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7 \ - --hash=sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42 \ - --hash=sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5 \ - --hash=sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa \ - --hash=sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818 \ - --hash=sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25 \ - --hash=sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08 \ - --hash=sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7 \ - --hash=sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b \ - --hash=sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754 \ - --hash=sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0 \ - --hash=sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912 \ - --hash=sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b \ - --hash=sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3 \ - --hash=sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700 \ - --hash=sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78 \ - --hash=sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290 \ - --hash=sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134 \ - --hash=sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294 \ - --hash=sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0 \ - --hash=sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5 \ - --hash=sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d \ - --hash=sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d \ - --hash=sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16 \ - --hash=sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195 \ - --hash=sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da \ - --hash=sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8 \ - --hash=sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098 \ - --hash=sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8 \ - --hash=sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063 \ - --hash=sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534 \ - --hash=sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d \ - --hash=sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109 \ - --hash=sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d \ - --hash=sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3 \ - --hash=sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2 \ - --hash=sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf \ - --hash=sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069 \ - --hash=sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7 \ - --hash=sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176 +orjson==3.10.5 \ + --hash=sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01 \ + --hash=sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa \ + --hash=sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5 \ + --hash=sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04 \ + --hash=sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d \ + --hash=sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e \ + --hash=sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b \ + --hash=sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b \ + --hash=sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268 \ + --hash=sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211 \ + --hash=sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c \ + --hash=sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c \ + --hash=sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969 \ + --hash=sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a \ + --hash=sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f \ + --hash=sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932 \ + --hash=sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26 \ + --hash=sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6 \ + --hash=sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214 \ + --hash=sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2 \ + --hash=sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f \ + --hash=sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96 \ + --hash=sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a \ + --hash=sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d \ + --hash=sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38 \ + --hash=sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807 \ + --hash=sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09 \ + --hash=sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6 \ + --hash=sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e \ + --hash=sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5 \ + --hash=sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86 \ + --hash=sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63 \ + --hash=sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2 \ + --hash=sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4 \ + --hash=sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595 \ + --hash=sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228 \ + --hash=sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9 \ + --hash=sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7 \ + --hash=sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40 \ + --hash=sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3 \ + --hash=sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139 \ + --hash=sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1 \ + --hash=sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b \ + --hash=sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47 \ + --hash=sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1 \ + --hash=sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca # via deepdiff packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -1287,9 +1236,7 @@ pip-tools==6.13.0 \ platformdirs==4.2.2 \ --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 - # via - # pylint - # virtualenv + # via virtualenv pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 @@ -1306,9 +1253,9 @@ prometheus-client==0.11.0 \ --hash=sha256:3a8baade6cb80bcfe43297e33e7623f3118d660d41387593758e2fb1ea173a86 \ --hash=sha256:b014bc76815eb1399da8ce5fc84b7717a3e63652b0c0f8804092c9363acab1b2 # via -r requirements.in -prompt-toolkit==3.0.46 \ - --hash=sha256:45abe60a8300f3c618b23c16c4bb98c6fc80af8ce8b17c7ae92db48db3ee63c1 \ - --hash=sha256:869c50d682152336e23c4db7f74667639b5047494202ffe7670817053fd57795 +prompt-toolkit==3.0.47 \ + --hash=sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10 \ + --hash=sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360 # via questionary proto-plus==1.23.0 \ --hash=sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2 \ @@ -1349,9 +1296,9 @@ pyasn1-modules==0.4.0 \ --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b # via google-auth -pycodestyle==2.11.1 \ - --hash=sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f \ - --hash=sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67 +pycodestyle==2.12.0 \ + --hash=sha256:442f950141b4f43df752dd303511ffded3a04c2b6fb7f65980574f0c31e6e79c \ + --hash=sha256:949a39f6b86c3e1515ba1787c2022131d165a8ad271b11370a8819aa070269e4 # via autopep8 pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -1480,10 +1427,6 @@ pyjwt[crypto]==2.8.0 \ # okta # pygithub # pyjwt -pylint==2.11.1 \ - --hash=sha256:0f358e221c45cbd4dad2a1e4b883e75d28acdcccd29d40c76eb72b307269b126 \ - --hash=sha256:2c9843fff1a88ca0ad98a256806c82c5a8f86086e7ccbdb93297d86c3f90c436 - # via -r requirements.in pynacl==1.5.0 \ --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ @@ -1683,6 +1626,25 @@ rsa==4.7.2 \ # via # awscli # google-auth +ruff==0.4.7 \ + --hash=sha256:07fc80bbb61e42b3b23b10fda6a2a0f5a067f810180a3760c5ef1b456c21b9db \ + --hash=sha256:10f2204b9a613988e3484194c2c9e96a22079206b22b787605c255f130db5ed7 \ + --hash=sha256:10f973d521d910e5f9c72ab27e409e839089f955be8a4c8826601a6323a89753 \ + --hash=sha256:13a1768b0691619822ae6d446132dbdfd568b700ecd3652b20d4e8bc1e498f78 \ + --hash=sha256:2331d2b051dc77a289a653fcc6a42cce357087c5975738157cd966590b18b5e1 \ + --hash=sha256:50e9651578b629baec3d1513b2534de0ac7ed7753e1382272b8d609997e27e83 \ + --hash=sha256:59c3d110970001dfa494bcd95478e62286c751126dfb15c3c46e7915fc49694f \ + --hash=sha256:769e5a51df61e07e887b81e6f039e7ed3573316ab7dd9f635c5afaa310e4030e \ + --hash=sha256:8874a9df7766cb956b218a0a239e0a5d23d9e843e4da1e113ae1d27ee420877a \ + --hash=sha256:9e3ab684ad403a9ed1226894c32c3ab9c2e0718440f6f50c7c5829932bc9e054 \ + --hash=sha256:a7c0083febdec17571455903b184a10026603a1de078428ba155e7ce9358c5f6 \ + --hash=sha256:ad1b20e66a44057c326168437d680a2166c177c939346b19c0d6b08a62a37589 \ + --hash=sha256:b9de9a6e49f7d529decd09381c0860c3f82fa0b0ea00ea78409b785d2308a567 \ + --hash=sha256:cbf5d818553add7511c38b05532d94a407f499d1a76ebb0cad0374e32bc67202 \ + --hash=sha256:e089371c67892a73b6bb1525608e89a2aca1b77b5440acf7a71dda5dac958f9e \ + --hash=sha256:fa4dafe3fe66d90e2e2b63fa1591dd6e3f090ca2128daa0be33db894e6c18648 \ + --hash=sha256:fa9773c6c00f4958f73b317bc0fd125295110c3776089f6ef318f4b775f0abe4 + # via -r requirements.in s3transfer==0.6.2 \ --hash=sha256:b014be3a8a2aab98cfe1abc7229cc5a9a0cf05eb9c1f2b86b230fd8df3f78084 \ --hash=sha256:cab66d3380cca3e70939ef2255d01cd8aece6a4907a9528740f668c4b0611861 @@ -1692,12 +1654,14 @@ s3transfer==0.6.2 \ scylla-driver==3.26.8 \ --hash=sha256:062e77352dc694cd6465a3e09436eebfb0ee2254806e75db5ebf65140355c26d \ --hash=sha256:0797cb40f4a93ae14132fef524cf3d7cd2b6644085a2de4ed740bf067e4bce45 \ + --hash=sha256:0a5ffd21e93af7f6f9620e7a8cf487d21bd0b638e41251e8563fd851d6f71a48 \ --hash=sha256:0b47bc799e8d89b874f999fac4c5f60421d569d512cf3604ac97a35022af3a79 \ --hash=sha256:0faf834985af19a80e4fbb8503e91351c6419651821e705ec98a2bc2849a9548 \ --hash=sha256:13fdff491c191f992da006382749f216949f3565eaea3854410cae8ee5ff6e57 \ --hash=sha256:146b080e79ea11e3ae0753ae9b06c8005bf53e958697caeec9eb3f77666a570d \ --hash=sha256:15380a4c5038d79456e34e7fe6db2e4a7de9f32aea1b2156fee8753324d979fd \ --hash=sha256:20a4d18a69e710bf0ce41db77249097761806356f58a2a6792fe55418a753ad5 \ + --hash=sha256:2193ba6ef01cd713c81ce722a38601a12f4b7dd9dca16e15609127d93cab3167 \ --hash=sha256:2433beb87519599830c4471de61c6429d7f1a1b32be515aea8dc83dd8b3d1d0c \ --hash=sha256:2db6847d75bd6f5d26ec211f092e65c22d83012a2fe8c9b344ed4247e68d29de \ --hash=sha256:3ee479330cc79f1f73036e4d32fd9838f6934e9bfa0f4f4cb2f93c6c44aedfdb \ @@ -1709,12 +1673,15 @@ scylla-driver==3.26.8 \ --hash=sha256:51b37667176d8ec775baac219e0e1d9344d9bc493e4b3d2887efee5661279368 \ --hash=sha256:5524dc80bd47a6f8395478dee9fb1a6ee092fe8b915e9b1dc3c089ebb61caef0 \ --hash=sha256:57c388298e5520004f4e46794578ccdf655b1873c313a2240b24747daac7edec \ + --hash=sha256:593527ba7a4b684ea9f28a50f205642aac6f3c3b913a54f39c41583b0a2f893b \ --hash=sha256:5ccf0a7c2b880093083adfe5d4e2ac66a45bd5008396da00e6e8ccaa0a4e9103 \ --hash=sha256:74565836561208c5106165fec6248c1b3a0e13a1fecbb8fbdea77ed4db2d20d0 \ --hash=sha256:756db4d57b076aaa67f9c457922b10376e74d2bb8ad16f0aae79d6b5b22d6b4b \ --hash=sha256:761fbf0f2c51de8f8327e5cd6f2a4c49b9e2a401907905332c14f3491089674a \ --hash=sha256:7677a217a70352c0c53eef96b4be9fdfd6395c29f6c0940dd143a9c4749011dc \ + --hash=sha256:76e5370fac089421f13621471b8ebcf381e6b8fa230d8ffce0aa36ad344bf3de \ --hash=sha256:7ddd3e8df44609bf7f6dd2d403f7cb6845f3b90180d9d1fc8eff17d91b8d47e2 \ + --hash=sha256:851039bb9421120efd97c08631355d4421508599324e8c15b5a0b0c6365a1ea4 \ --hash=sha256:870a0fd86644b50a0688496a2c6b0d00b72152a108011c80381f95a448485c13 \ --hash=sha256:8817aee4de971c0e1c4a09201a9ca1476fa1112bc4e466e4a07e5a033db8be1a \ --hash=sha256:8bb0f741bf7d8ad9cba1ee1bb52511654ea771c7aa67661482204c185201eff4 \ @@ -1725,11 +1692,14 @@ scylla-driver==3.26.8 \ --hash=sha256:94525445588d6e1982bb624460d1e53f32ea854f288dd15696ba655e30df0cb8 \ --hash=sha256:b1bef1b12a8d77600ff10e5cd39a8cd66f742b1f9b9a852a18fe9fd245ad1919 \ --hash=sha256:bb10c43e04ba9a2f8dc510d5133cf6bd299d6349dd4e4c2927fb968a6b10ee4a \ + --hash=sha256:be99bc3b4c33ee26c84d756d9c2e5e4f4ae1d6b0f2c9a1e49a38836129b13afd \ --hash=sha256:c25298a52c115f2cb345cd9ca0f71e2a4c9584f386fbb57373cb8de46f1b7e0d \ + --hash=sha256:c7f71d6db04da42b15fb5c602c3410560df992c046df486938345fcb996bd151 \ --hash=sha256:cd4a384d3fdf0f1b110d7a249bdd3c2a68b16c2ed63c3dec3c85b32ee9fa2273 \ --hash=sha256:d3bc4cd9e40ba74f38d607161aa39ba6ed8d0af28bc989d373c2bce5ca8f6175 \ --hash=sha256:d93ebfe445173e9675e52906cc55bab6bd051cc6442cf9594e7e1578e23e8d8b \ --hash=sha256:d9c03f7ac60badf63cfeac3fa02e07d2b2bf9b76f426ab13b3dae5cc6891a453 \ + --hash=sha256:e33f410f7ba1034acd2559cfbceca9c51722ff665e344c0bd04d4c95e99ff3d9 \ --hash=sha256:e75d16a710b512707da96a74b3250749468b5067dd1f9ad852df57875a63df0a \ --hash=sha256:f196bc61feebd5bde0707f876241c9e96a9c70a2bcae91cdae3a28599147662c \ --hash=sha256:f3af534619ecadfce4fdd9ee3ba34f4764a431c976c2a79cac09e4527d557007 @@ -1831,7 +1801,6 @@ toml==0.10.2 \ # via # autopep8 # pre-commit - # pylint tomli==2.0.1 \ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f @@ -1848,17 +1817,17 @@ typepy[datetime]==1.3.2 \ # sqliteschema # tabledata # tcconfig -types-awscrt==0.20.9 \ - --hash=sha256:3ae374b553e7228ba41a528cf42bd0b2ad7303d806c73eff4aaaac1515e3ea4e \ - --hash=sha256:64898a2f4a2468f66233cb8c29c5f66de907cf80ba1ef5bb1359aef2f81bb521 +types-awscrt==0.20.12 \ + --hash=sha256:0beabdde0205dc1da679ea464fd3f98b570ef4f0fc825b155a974fb51b21e8d9 \ + --hash=sha256:521ce54cc4dad9fe6480556bb0f8315a508106938ba1f2a0baccfcea7d4a4dee # via botocore-stubs types-s3transfer==0.10.1 \ --hash=sha256:02154cce46528287ad76ad1a0153840e0492239a0887e8833466eccf84b98da0 \ --hash=sha256:49a7c81fa609ac1532f8de3756e64b58afcecad8767933310228002ec7adff74 # via boto3-stubs -typing-extensions==4.12.1 \ - --hash=sha256:6024b58b69089e5a89c347397254e35f1bf02a907728ec7fee9bf0fe837d203a \ - --hash=sha256:915f5e35ff76f56588223f15fdd5938f9a1cf9195c0de25130c627e4d597f6d1 +typing-extensions==4.12.2 \ + --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ + --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 # via # azure-core # azure-storage-blob @@ -1909,61 +1878,78 @@ wheel==0.43.0 \ --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 # via pip-tools -wrapt==1.13.3 \ - --hash=sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179 \ - --hash=sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096 \ - --hash=sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374 \ - --hash=sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df \ - --hash=sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185 \ - --hash=sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785 \ - --hash=sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7 \ - --hash=sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909 \ - --hash=sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918 \ - --hash=sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33 \ - --hash=sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068 \ - --hash=sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829 \ - --hash=sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af \ - --hash=sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79 \ - --hash=sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce \ - --hash=sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc \ - --hash=sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36 \ - --hash=sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade \ - --hash=sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca \ - --hash=sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32 \ - --hash=sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125 \ - --hash=sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e \ - --hash=sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709 \ - --hash=sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f \ - --hash=sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b \ - --hash=sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb \ - --hash=sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb \ - --hash=sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489 \ - --hash=sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640 \ - --hash=sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb \ - --hash=sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851 \ - --hash=sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d \ - --hash=sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44 \ - --hash=sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13 \ - --hash=sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2 \ - --hash=sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb \ - --hash=sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b \ - --hash=sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9 \ - --hash=sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755 \ - --hash=sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c \ - --hash=sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a \ - --hash=sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf \ - --hash=sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3 \ - --hash=sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229 \ - --hash=sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e \ - --hash=sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de \ - --hash=sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554 \ - --hash=sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10 \ - --hash=sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80 \ - --hash=sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056 \ - --hash=sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea - # via - # astroid - # deprecated +wrapt==1.16.0 \ + --hash=sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc \ + --hash=sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81 \ + --hash=sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09 \ + --hash=sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e \ + --hash=sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca \ + --hash=sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0 \ + --hash=sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb \ + --hash=sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487 \ + --hash=sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40 \ + --hash=sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c \ + --hash=sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060 \ + --hash=sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202 \ + --hash=sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41 \ + --hash=sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9 \ + --hash=sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b \ + --hash=sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664 \ + --hash=sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d \ + --hash=sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362 \ + --hash=sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00 \ + --hash=sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc \ + --hash=sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1 \ + --hash=sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267 \ + --hash=sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956 \ + --hash=sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966 \ + --hash=sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1 \ + --hash=sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228 \ + --hash=sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72 \ + --hash=sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d \ + --hash=sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292 \ + --hash=sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0 \ + --hash=sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0 \ + --hash=sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36 \ + --hash=sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c \ + --hash=sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5 \ + --hash=sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f \ + --hash=sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73 \ + --hash=sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b \ + --hash=sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2 \ + --hash=sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593 \ + --hash=sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39 \ + --hash=sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389 \ + --hash=sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf \ + --hash=sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf \ + --hash=sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89 \ + --hash=sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c \ + --hash=sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c \ + --hash=sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f \ + --hash=sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440 \ + --hash=sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465 \ + --hash=sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136 \ + --hash=sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b \ + --hash=sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8 \ + --hash=sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3 \ + --hash=sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8 \ + --hash=sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6 \ + --hash=sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e \ + --hash=sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f \ + --hash=sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c \ + --hash=sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e \ + --hash=sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8 \ + --hash=sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2 \ + --hash=sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020 \ + --hash=sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35 \ + --hash=sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d \ + --hash=sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3 \ + --hash=sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537 \ + --hash=sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809 \ + --hash=sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d \ + --hash=sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a \ + --hash=sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4 + # via deprecated xmltodict==0.13.0 \ --hash=sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56 \ --hash=sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852 @@ -2077,6 +2063,5 @@ setuptools==70.0.0 \ --hash=sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0 # via # anyconfig - # astroid # kubernetes # pip-tools diff --git a/sct.py b/sct.py index 549372c333..0828b4b771 100755 --- a/sct.py +++ b/sct.py @@ -321,7 +321,7 @@ def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend) @sct_option('--test-id', 'test_id', help='test id to filter by') @click.option('--verbose', is_flag=True, default=False, help='if enable, will log progress') @click.pass_context -def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): +def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-arguments,too-many-branches,too-many-statements add_file_logger() @@ -797,7 +797,7 @@ def _run_yaml_test(backend, full_path, env): config = SCTConfiguration() config.verify_configuration() config.check_required_files() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 output.append(''.join(traceback.format_exception(type(exc), exc, exc.__traceback__))) error = True return error, output @@ -817,7 +817,7 @@ def lint_yamls(backend, exclude: str, include: str): # pylint: disable=too-many continue try: exclude_filters.append(re.compile(flt)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 raise ValueError(f'Exclude filter "{flt}" compiling failed with: {exc}') from exc include_filters = [] @@ -826,7 +826,7 @@ def lint_yamls(backend, exclude: str, include: str): # pylint: disable=too-many continue try: include_filters.append(re.compile(flt)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 raise ValueError(f'Include filter "{flt}" compiling failed with: {exc}') from exc original_env = {**os.environ} @@ -967,7 +967,7 @@ def show_monitor(test_id, date_time, kill, cluster_name): containers = {} try: containers = restore_monitoring_stack(test_id, date_time) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(details) if not containers: @@ -1270,7 +1270,7 @@ def get_test_results_for_failed_test(test_status, start_time): @click.option('--runner-ip', type=str, required=False, help="Sct runner ip for the running test") @click.option('--email-recipients', help="Send email to next recipients") @click.option('--logdir', help='Directory where to find testrun folder') -def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, +def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, # noqa: PLR0912 email_recipients=None, logdir=None): if started_by is None: started_by = get_username() @@ -1350,7 +1350,7 @@ def send_email(test_id=None, test_status=None, start_time=None, started_by=None, sys.exit(1) try: reporter.send_report(test_results) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to create email due to the following error:\n%s", traceback.format_exc()) build_reporter("TestAborted", email_recipients, testrun_dir).send_report({ "job_url": os.environ.get("BUILD_URL"), @@ -1475,11 +1475,11 @@ def prepare_regions(cloud_provider, regions): for region in regions: if cloud_provider == "aws": - region = AwsRegion(region_name=region) + region = AwsRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "azure": - region = AzureRegion(region_name=region) + region = AzureRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "gce": - region = GceRegion(region_name=region) + region = GceRegion(region_name=region) # noqa: PLW2901 else: raise Exception(f'Unsupported Cloud provider: `{cloud_provider}') region.configure() diff --git a/sdcm/audit.py b/sdcm/audit.py index 177d708f8d..47cb3df729 100644 --- a/sdcm/audit.py +++ b/sdcm/audit.py @@ -121,7 +121,7 @@ def get_audit_log_rows(node, # pylint: disable=too-many-locals if '!NOTICE' in line[:120] and 'scylla-audit' in line[:120]: while line[-2] != '"': # read multiline audit log (must end with ") - line += log_file.readline() + line += log_file.readline() # noqa: PLW2901 audit_data = line.split(': "', maxsplit=1)[-1] try: node, cat, consistency, table, keyspace_name, opr, source, username, error = audit_data.split( diff --git a/sdcm/cassandra_harry_thread.py b/sdcm/cassandra_harry_thread.py index a341962b9f..34e30fe336 100644 --- a/sdcm/cassandra_harry_thread.py +++ b/sdcm/cassandra_harry_thread.py @@ -103,7 +103,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): retry=0, ) result = self._parse_harry_summary(docker_run_result.stdout.splitlines()) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 errors_str = format_stress_cmd_error(exc) if "timeout" in errors_str: event_type = CassandraHarryEvent.timeout diff --git a/sdcm/cdclog_reader_thread.py b/sdcm/cdclog_reader_thread.py index 1a309b8da6..b901b36505 100644 --- a/sdcm/cdclog_reader_thread.py +++ b/sdcm/cdclog_reader_thread.py @@ -79,7 +79,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): # pylint: disable=unused-ar stress_cmd=self.stress_cmd, errors=result.stderr.split("\n")).publish() return result - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 CDCReaderStressEvent.failure(node=loader, stress_cmd=self.stress_cmd, errors=[format_stress_cmd_error(exc), ]).publish() diff --git a/sdcm/cluster.py b/sdcm/cluster.py index 812b6a0a76..ea5a41d513 100644 --- a/sdcm/cluster.py +++ b/sdcm/cluster.py @@ -546,7 +546,7 @@ def short_hostname(self): if not self._short_hostname: try: self._short_hostname = self.remoter.run('hostname -s').stdout.strip() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return "no_booted_yet" return self._short_hostname @@ -597,7 +597,7 @@ def cpu_cores(self) -> Optional[int]: try: result = self.remoter.run("nproc", ignore_status=True) return int(result.stdout) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Failed to get number of cores due to the %s", details) return None @@ -613,7 +613,7 @@ def scylla_shards(self) -> int: shards = self.smp or self.cpuset or self.cpu_cores try: return int(shards) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Failed to convert to integer shards value: %s", shards) return 0 @@ -639,7 +639,7 @@ def cpuset(self): # 'CPUSET="--cpuset 1-7,9-15,17-23,25-31 "' # And so on... cpuset_file_lines = self.remoter.run("cat /etc/scylla.d/cpuset.conf").stdout - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Failed to get CPUSET. Error: {exc}") return '' @@ -677,7 +677,7 @@ def smp(self): try: grep_result = self.remoter.sudo(f'grep "^SCYLLA_ARGS=" {self.scylla_server_sysconfig_path}') - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Failed to get SCYLLA_ARGS. Error: {exc}") return '' @@ -722,7 +722,7 @@ def extract_seeds_from_scylla_yaml(self): try: node_seeds = conf_dict['seed_provider'][0]['parameters'][0].get('seeds') - except Exception as details: + except Exception as details: # noqa: BLE001 self.log.debug('Loaded YAML data structure: %s', conf_dict) raise ValueError('Exception determining seed node ips') from details @@ -756,7 +756,7 @@ def scylla_pkg(self): def file_exists(self, file_path: str) -> Optional[bool]: try: return self.remoter.sudo(f"test -e '{file_path}'", ignore_status=True).ok - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error checking if file %s exists: %s", file_path, details) return None @@ -956,15 +956,14 @@ def start_journal_thread(self): if self._journal_thread: self.log.debug("Use %s as logging daemon", type(self._journal_thread).__name__) self._journal_thread.start() + elif logs_transport == 'syslog-ng': + self.log.debug("Use no logging daemon since log transport is syslog-ng") else: - if logs_transport == 'syslog-ng': - self.log.debug("Use no logging daemon since log transport is syslog-ng") - else: - TestFrameworkEvent( - source=self.__class__.__name__, - source_method='start_journal_thread', - message="Got no logging daemon by unknown reason" - ).publish_or_dump() + TestFrameworkEvent( + source=self.__class__.__name__, + source_method='start_journal_thread', + message="Got no logging daemon by unknown reason" + ).publish_or_dump() def start_coredump_thread(self): self._coredump_thread = CoredumpExportSystemdThread(self, self._maximum_number_of_cores_to_publish) @@ -1036,7 +1035,7 @@ def hard_reboot(self): # pylint: disable=no-self-use def soft_reboot(self): # pylint: disable=no-self-use try: self.remoter.run('sudo reboot', ignore_status=True, retry=0) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass def restart_binary_protocol(self, verify_up=True): @@ -1079,7 +1078,7 @@ def uptime_changed(): except SSHException as ex: self.log.debug("Network isn't available, reboot might already start, %s" % ex) return False - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.debug('Failed to get uptime during reboot, %s' % ex) return False @@ -1195,7 +1194,7 @@ def get_installed_packages(self): try: result = self.remoter.run(cmd, verbose=False) return result.stdout.strip() - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Error retrieving installed packages: %s', details) return None @@ -1223,9 +1222,9 @@ def is_port_used(self, port: int, service_name: str) -> bool: # this is the case output is empty return False else: - self.log.error("Error checking for '%s' on port %s: rc:", service_name, port, result) + self.log.error("Error checking for '%s' on port %s: rc: %s", service_name, port, result) return False - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error checking for '%s' on port %s: %s", service_name, port, details) return False @@ -1342,7 +1341,7 @@ def _report_housekeeping_uuid(self): if self.uuid and not mark_exists: self.remoter.run(cmd % self.uuid, ignore_status=True) self.remoter.sudo('touch %s' % mark_path, verbose=False, user='scylla') - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Failed to report housekeeping uuid. Error details: %s', details) def wait_db_up(self, verbose=True, timeout=3600): @@ -1513,7 +1512,7 @@ def decode_backtrace(self): self.log.debug("Found issue for %s event: %s", event.event_id, event.known_issue) except queue.Empty: pass - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("failed to decode backtrace %s", details) finally: if event: @@ -1921,7 +1920,7 @@ def update_repo_cache(self): self.remoter.sudo('apt-get clean all') self.remoter.sudo('rm -rf /var/cache/apt/') self.remoter.sudo('apt-get update', retry=3) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Failed to update repo cache: %s', ex) def upgrade_system(self): @@ -2275,7 +2274,7 @@ def install_mgmt(self, package_url: Optional[str] = None) -> None: if self.is_docker(): try: self.remoter.run("echo no | sudo scyllamgr_setup") - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.warning(ex) else: self.remoter.run("echo yes | sudo scyllamgr_setup") @@ -2426,7 +2425,7 @@ def stop_scylla_server(self, verify_up=False, verify_down=True, timeout=300, ign self.wait_db_up(timeout=timeout) try: self.stop_service(service_name='scylla-server', timeout=timeout, ignore_status=ignore_status) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 if isinstance(details, RetryableNetworkException): details = details.original if details.__class__.__name__.endswith("CommandTimedOut"): @@ -2616,7 +2615,7 @@ def run_nodetool(self, sub_cmd: str, args: str = "", options: str = "", timeout: nodetool_event.duration = result.duration return result - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 if isinstance(details, RetryableNetworkException): details = details.original if coredump_on_timeout and details.__class__.__name__.endswith("CommandTimedOut"): @@ -2699,11 +2698,11 @@ def get_nodes_status(self) -> dict[BaseNode, dict]: if node := node_ip_map.get(node_ip): nodes_status[node] = {'status': node_properties['state'], 'dc': dc, 'rack': node_properties['rack']} - else: + else: # noqa: PLR5501 if node_ip: LOGGER.error("Get nodes statuses. Failed to find a node in cluster by IP: %s", node_ip) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 ClusterHealthValidatorEvent.NodeStatus( severity=Severity.WARNING, node=self.name, @@ -2837,7 +2836,7 @@ def run_cqlsh(self, cmd, keyspace=None, timeout=120, verbose=True, split=False, cqlsh_out = self.remoter.run(cmd, timeout=timeout + 120, # we give 30 seconds to cqlsh timeout mechanism to work verbose=verbose) break - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 num_retry_on_failure -= 1 if not num_retry_on_failure: raise @@ -2909,7 +2908,7 @@ def get_scylla_config_param(self, config_param_name, verbose=True): if verbose: self.log.debug(f'{config_param_name} parameter value: {request_out.stdout}') return request_out.stdout - except Exception as e: # pylint: disable=broad-except + except Exception as e: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f'Failed to retreive value of {config_param_name} parameter. Error: {e}') return None @@ -3068,7 +3067,7 @@ def get_token_ring_members(self) -> list[dict[str, str]]: return [] try: result_json = json.loads(result.stdout) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Error getting token-ring data: %s", exc) return [] @@ -3461,7 +3460,7 @@ def create_ssl_context(keyfile: str, certfile: str, truststore: str): ssl_context.load_verify_locations(cafile=truststore) return ssl_context - def _create_session(self, node, keyspace, user, password, compression, protocol_version, load_balancing_policy=None, port=None, + def _create_session(self, node, keyspace, user, password, compression, protocol_version, load_balancing_policy=None, port=None, # noqa: PLR0913 ssl_context=None, node_ips=None, connect_timeout=None, verbose=True, connection_bundle_file=None): if not port: port = node.CQL_PORT @@ -3644,7 +3643,7 @@ def execute_cmd(cql_session, entity_type): publish_event=False, retry=3) cf_stats = db_node._parse_cfstats(res.stdout) # pylint: disable=protected-access has_data = bool(cf_stats['Number of partitions (estimate)']) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning(f'Failed to get rows from {table_name} table. Error: {exc}') if not has_data: @@ -3672,7 +3671,7 @@ def is_table_has_data(self, session, table_name: str) -> (bool, Optional[Excepti result = session.execute(SimpleStatement(f"SELECT * FROM {table_name}", fetch_size=10)) return result and bool(len(result.one())), None - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning(f'Failed to get rows from {table_name} table. Error: {exc}') return False, exc @@ -3796,13 +3795,13 @@ def node_setup(_node: BaseNode, task_queue: queue.Queue): exception_details = None try: cl_inst.node_setup(_node, **setup_kwargs) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 exception_details = (str(ex), traceback.format_exc()) try: _node.update_shards_in_argus() LOGGER.info("DC: %s | Rack: %s", _node.datacenter, _node.node_rack) _node.update_rack_info_in_argus(_node.datacenter, _node.node_rack) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failure settings shards for node %s in Argus.", _node) LOGGER.debug("Exception details:\n", exc_info=True) task_queue.put((_node, exception_details)) @@ -3813,7 +3812,7 @@ def node_startup(_node: BaseNode, task_queue: queue.Queue): exception_details = None try: cl_inst.node_startup(_node, **setup_kwargs) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 exception_details = (str(ex), traceback.format_exc()) task_queue.put((_node, exception_details)) task_queue.task_done() @@ -3891,7 +3890,7 @@ def critical_node_setup_events(): for node in node_list: try: node.update_rack_info_in_argus(node.datacenter, node.node_rack) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failure settings dc/rack infomration for %s in Argus.", node) LOGGER.debug("Exception details:\n", exc_info=True) @@ -4488,7 +4487,7 @@ def _rotate_kms_key(kms_key_alias_name, kms_key_rotation_interval, db_cluster): time.sleep(kms_key_rotation_interval * 60) try: aws_kms.rotate_kms_key(kms_key_alias_name=kms_key_alias_name) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 AwsKmsEvent( message=f"Failed to rotate AWS KMS key for the '{kms_key_alias_name}' alias", traceback=traceback.format_exc()).publish() @@ -4522,7 +4521,7 @@ def _rotate_kms_key(kms_key_alias_name, kms_key_rotation_interval, db_cluster): AwsKmsEvent( message="Failed to get any table for the KMS key rotation thread", traceback=traceback.format_exc()).publish() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 AwsKmsEvent( message="Failed to check the fact of encryption (KMS) for sstables", traceback=traceback.format_exc()).publish() @@ -4709,7 +4708,7 @@ def _scylla_install(self, node): install_mode = self.params.get('install_mode') try: mode = InstallMode(install_mode) - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise ValueError(f'Invalid install mode: {install_mode}, err: {ex}') from ex if mode == InstallMode.WEB: @@ -4873,7 +4872,7 @@ def get_node_ip_list(verification_node): for nodes_ips in status.values(): ip_node_list.extend(nodes_ips.keys()) return ip_node_list - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(str(details)) return None @@ -5016,7 +5015,7 @@ def gemini_version(self): f'gemini --version', ignore_status=True) if result.ok: self._gemini_version = get_gemini_version(result.stdout) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error get gemini version: %s", details) return self._gemini_version @@ -5119,7 +5118,7 @@ def kill_stress_thread(self): if self.nodes and self.nodes[0].is_kubernetes(): for node in self.nodes: node.remoter.stop() - else: + else: # noqa: PLR5501 if self.params.get("use_prepared_loaders"): self.kill_cassandra_stress_thread() else: @@ -5147,7 +5146,7 @@ def kill_cs_process(loader, filter_cmd): wait.wait_for(kill_cs_process, text="Search and kill c-s processes", timeout=30, throw_exc=False, loader=loader, filter_cmd=search_cmd) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("failed to kill stress-command on [%s]: [%s]", str(loader), str(ex)) @@ -5156,7 +5155,7 @@ def kill_docker_loaders(self): try: loader.remoter.run(cmd='docker ps -a -q | xargs docker rm -f', verbose=True, ignore_status=True) self.log.info("Killed docker loader on node: %s", loader.name) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("failed to kill docker stress command on [%s]: [%s]", str(loader), str(ex)) @@ -5171,7 +5170,7 @@ def _parse_cs_summary(lines): enable_parse = False for line in lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 if not line: continue # Parse loader & cpu info @@ -5774,7 +5773,7 @@ def get_grafana_annotations(self, node): grafana_port=self.grafana_port)) if res.ok: return res.content - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("unable to get grafana annotations [%s]", str(ex)) return "" @@ -5849,7 +5848,7 @@ def download_monitor_data(self) -> str: if snapshot_archive := PrometheusSnapshots(name='prometheus_snapshot').collect(self.nodes[0], self.logdir): self.log.debug("Snapshot local path: %s", snapshot_archive) return upload_archive_to_s3(snapshot_archive, self.monitor_id) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error downloading prometheus data dir: %s", details) return "" diff --git a/sdcm/cluster_aws.py b/sdcm/cluster_aws.py index 0f33f2c38e..7eef04056f 100644 --- a/sdcm/cluster_aws.py +++ b/sdcm/cluster_aws.py @@ -80,7 +80,7 @@ class AWSCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr Cluster of Node objects, started on Amazon EC2. """ - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, cluster_uuid=None, ec2_instance_type='c6i.xlarge', ec2_ami_username='root', ec2_user_data='', ec2_block_device_mappings=None, @@ -356,7 +356,7 @@ def update_bootstrap(ec2_user_data, enable_auto_bootstrap): ec2_user_data.replace('--bootstrap false', '--bootstrap true') else: ec2_user_data += ' --bootstrap true ' - else: + else: # noqa: PLR5501 if '--bootstrap ' in ec2_user_data: ec2_user_data.replace('--bootstrap true', '--bootstrap false') else: @@ -572,7 +572,7 @@ def check_spot_termination(self): return max(next_check_delay - SPOT_TERMINATION_CHECK_OVERHEAD, 0) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.warning('Error during getting spot termination notification %s', details) return SPOT_TERMINATION_CHECK_DELAY @@ -844,7 +844,7 @@ def ena_support(self) -> bool: class ScyllaAWSCluster(cluster.BaseScyllaCluster, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c6i.xlarge', ec2_ami_username='centos', ec2_block_device_mappings=None, @@ -974,7 +974,7 @@ def get_seed_nodes(self): conf_dict = yaml.safe_load(yaml_stream) try: return conf_dict['seed_provider'][0]['parameters'][0]['seeds'].split(',') - except Exception as exc: + except Exception as exc: # noqa: BLE001 raise ValueError('Unexpected cassandra.yaml. Contents:\n%s' % yaml_stream.read()) from exc # pylint: disable=too-many-arguments @@ -1050,7 +1050,7 @@ def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: class MonitorSetAWS(cluster.BaseMonitorSet, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c6i.xlarge', ec2_block_device_mappings=None, ec2_ami_username='centos', diff --git a/sdcm/cluster_azure.py b/sdcm/cluster_azure.py index ef9623a4c1..398283f919 100644 --- a/sdcm/cluster_azure.py +++ b/sdcm/cluster_azure.py @@ -125,7 +125,7 @@ def check_spot_termination(self): else: # other EventType's that can be triggered by Azure's maintenance: "Reboot" | "Redeploy" | "Freeze" | "Terminate" self.log.warning(f"Unhandled Azure scheduled event: {event}") - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.warning('Error during getting Azure scheduled events: %s', details) return 0 return SPOT_TERMINATION_CHECK_DELAY @@ -174,7 +174,7 @@ def configure_remote_logging(self) -> None: class AzureCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attributes - def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals + def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals # noqa: PLR0913 provisioners: List[AzureProvisioner], credentials, cluster_uuid=None, instance_type='Standard_L8s_v3', region_names=None, user_name='root', cluster_prefix='cluster', @@ -230,7 +230,7 @@ def _create_node(self, instance, node_index, dc_idx, rack): rack=rack) node.init() return node - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise CreateAzureNodeError('Failed to create node: %s' % ex) from ex def _create_instances(self, count, dc_idx=0, instance_type=None) -> List[VmInstance]: diff --git a/sdcm/cluster_docker.py b/sdcm/cluster_docker.py index f0905b29ea..3d28b40fd1 100644 --- a/sdcm/cluster_docker.py +++ b/sdcm/cluster_docker.py @@ -494,11 +494,11 @@ def destroy(self): try: self.stop_scylla_monitoring(node) self.log.error("Stopping scylla monitoring succeeded") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Stopping scylla monitoring failed with {str(exc)}") try: node.remoter.sudo(f"rm -rf '{self.monitor_install_path_base}'") self.log.error("Cleaning up scylla monitoring succeeded") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Cleaning up scylla monitoring failed with {str(exc)}") node.destroy() diff --git a/sdcm/cluster_gce.py b/sdcm/cluster_gce.py index 881b58bdeb..c0f29de322 100644 --- a/sdcm/cluster_gce.py +++ b/sdcm/cluster_gce.py @@ -188,7 +188,7 @@ def check_spot_termination(self): GceInstanceEvent(entry).publish() case _: GceInstanceEvent(entry, severity=Severity.WARNING).publish() - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.warning('Error during getting spot termination notification %s', details) self._last_logs_fetch_time = since return SPOT_TERMINATION_CHECK_DELAY @@ -248,7 +248,7 @@ class GCECluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr """ _gce_service: compute_v1.InstancesClient - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 cluster_uuid=None, gce_instance_type='n1-standard-1', gce_region_names=None, gce_n_local_ssd=1, gce_image_username='root', cluster_prefix='cluster', node_prefix='node', n_nodes=3, add_disks=None, params=None, node_type=None, @@ -336,7 +336,7 @@ def _get_disks_struct(self, name, dc_idx): gce_disk_struct.append(self._get_local_ssd_disk_struct(name=name, index=i, dc_idx=dc_idx)) if self._add_disks: for disk_type, disk_size in self._add_disks.items(): - disk_size = int(disk_size) + disk_size = int(disk_size) # noqa: PLW2901 if disk_size: gce_disk_struct.append(self._get_persistent_disk_struct(name=name, disk_size=disk_size, disk_type=disk_type, dc_idx=dc_idx)) @@ -505,7 +505,7 @@ def _create_node(self, instance, node_index, dc_idx, rack): rack=rack) node.init() return node - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise CreateGCENodeError('Failed to create node: %s' % ex) from ex # pylint: disable=too-many-arguments @@ -542,7 +542,7 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots class ScyllaGCECluster(cluster.BaseScyllaCluster, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=3, add_disks=None, params=None, gce_datacenter=None, service_accounts=None): @@ -578,7 +578,7 @@ def _wait_for_preinstalled_scylla(node): class LoaderSetGCE(cluster.BaseLoaderSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=10, add_disks=None, params=None, gce_datacenter=None): @@ -609,7 +609,7 @@ def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_s class MonitorSetGCE(cluster.BaseMonitorSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=1, targets=None, add_disks=None, params=None, gce_datacenter=None, diff --git a/sdcm/cluster_k8s/__init__.py b/sdcm/cluster_k8s/__init__.py index a5a516b4aa..58b3a62190 100644 --- a/sdcm/cluster_k8s/__init__.py +++ b/sdcm/cluster_k8s/__init__.py @@ -249,7 +249,7 @@ def readiness_timeout(self) -> int: def nodes(self): try: return self.k8s_cluster.k8s_core_v1_api.list_node(label_selector=f'{self.pool_label_name}={self.name}') - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.k8s_cluster.log.debug("Failed to get nodes list: %s", str(details)) return {} @@ -822,7 +822,7 @@ def upgrade_scylla_operator(self, new_helm_repo: str, continue self.apply_file( os.path.join(crd_basedir, current_file), modifiers=[], envsubst=False) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.debug("Upgrade Scylla Operator CRDs: Exception: %s", exc) self.log.info("Upgrade Scylla Operator CRDs: END") @@ -1270,7 +1270,7 @@ def deploy_scylla_cluster(self, node_pool_name: str, namespace: str = SCYLLA_NAM self.kubectl("get scyllaclusters.scylla.scylladb.com", namespace=namespace) self.start_scylla_cluster_events_thread() return - except Exception as exc: + except Exception as exc: # noqa: BLE001 raise RuntimeError( "SCT_REUSE_CLUSTER is set, but target scylla cluster is unhealthy") from exc @@ -1665,7 +1665,7 @@ def scylla_config_map(self, namespace: str = SCYLLA_NAMESPACE) -> dict: config_map = self.k8s_core_v1_api.read_namespaced_config_map( name=SCYLLA_CONFIG_NAME, namespace=namespace).data or {} exists = True - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 config_map = {} exists = False original_config_map = deepcopy(config_map) @@ -1905,7 +1905,7 @@ def _refresh_instance_state(self): def k8s_pod_uid(self) -> str: try: return str(self._pod.metadata.uid) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return '' @property @@ -2169,7 +2169,7 @@ def _restart_node_with_resharding(self, murmur3_partitioner_ignore_msb_bits: int def is_seed(self) -> bool: try: return 'scylla/seed' in self._svc.metadata.labels - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return False @is_seed.setter @@ -2783,7 +2783,7 @@ def _get_rack_nodes(self, rack: int, dc_idx: int) -> list: return sorted( [node for node in self.nodes if node.rack == rack and node.dc_idx == dc_idx], key=lambda n: n.name) - def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches + def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches noqa: PLR0913 count: int, ec2_user_data: str = "", # NOTE: 'dc_idx=None' means 'create %count% nodes on each K8S cluster' @@ -2961,7 +2961,7 @@ def check_kubernetes_monitoring_health(self) -> bool: cluster_name=self.scylla_cluster_name, namespace=self.namespace) PrometheusDBStats(host=prometheus_ip, port=k8s_cluster.prometheus_port, protocol='https') kmh_event.message = "Kubernetes monitoring health checks have successfully been finished" - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 ClusterHealthValidatorEvent.MonitoringStatus( error=f'Failed to connect to K8S prometheus server (namespace={self.namespace}) at ' f'{prometheus_ip}:{k8s_cluster.prometheus_port}, due to the: \n' diff --git a/sdcm/cluster_k8s/eks.py b/sdcm/cluster_k8s/eks.py index 818adc73a3..178117730d 100644 --- a/sdcm/cluster_k8s/eks.py +++ b/sdcm/cluster_k8s/eks.py @@ -162,7 +162,7 @@ class EksNodePool(CloudK8sNodePool): disk_type: Literal["standard", "io1", "io2", "gp2", "gp3", "sc1", "st1"] # pylint: disable=too-many-arguments,too-many-locals - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'EksCluster', name: str, @@ -312,7 +312,7 @@ def undeploy(self): self.k8s_cluster.eks_client.delete_nodegroup( clusterName=self.k8s_cluster.short_cluster_name, nodegroupName=self.name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.k8s_cluster.log.debug( "Failed to delete nodegroup %s/%s, due to the following error:\n%s", self.k8s_cluster.short_cluster_name, self.name, exc) @@ -340,7 +340,7 @@ class EksCluster(KubernetesCluster, EksClusterCleanupMixin): # pylint: disable= short_cluster_name: str # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 eks_cluster_version, ec2_security_group_ids, ec2_subnet_ids, diff --git a/sdcm/cluster_k8s/gke.py b/sdcm/cluster_k8s/gke.py index 8b3352fd15..566a67b50a 100644 --- a/sdcm/cluster_k8s/gke.py +++ b/sdcm/cluster_k8s/gke.py @@ -142,7 +142,7 @@ class GkeNodePool(CloudK8sNodePool): k8s_cluster: 'GkeCluster' # pylint: disable=too-many-arguments - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'KubernetesCluster', name: str, @@ -231,7 +231,7 @@ def instance_group_name(self) -> str: f'--cluster {self.k8s_cluster.short_cluster_name}') ).get('instanceGroupUrls')[0] return group_link.split('/')[-1] - except Exception as exc: + except Exception as exc: # noqa: BLE001 raise RuntimeError(f"Can't get instance group name due to the: {exc}") from exc def remove_instance(self, instance_name: str): @@ -267,7 +267,7 @@ class GkeCluster(KubernetesCluster): pools: Dict[str, GkeNodePool] # pylint: disable=too-many-arguments,too-many-locals - def __init__(self, + def __init__(self, # noqa: PLR0913 gke_cluster_version, gke_k8s_release_channel, gce_disk_size, diff --git a/sdcm/cluster_k8s/mini_k8s.py b/sdcm/cluster_k8s/mini_k8s.py index f1613c4cfa..fe6cc6b540 100644 --- a/sdcm/cluster_k8s/mini_k8s.py +++ b/sdcm/cluster_k8s/mini_k8s.py @@ -345,7 +345,7 @@ def static_local_volume_provisioner_image(self): continue try: return doc["spec"]["template"]["spec"]["containers"][0]["image"] - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning( "Could not read the static local volume provisioner image: %s", exc) return "" @@ -377,7 +377,7 @@ def ingress_controller_images(self): for container in doc["spec"]["template"]["spec"]["containers"]: try: ingress_images.add(container["image"]) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning( "Could not read the ingress controller related image: %s", exc) return ingress_images @@ -607,9 +607,8 @@ def on_deploy_completed(self): # pylint: disable=too-many-branches images_to_cache.extend(self.cert_manager_images) if self.params.get("k8s_local_volume_provisioner_type") != 'static': images_to_cache.append(self.dynamic_local_volume_provisioner_image) - else: - if provisioner_image := self.static_local_volume_provisioner_image: - images_to_cache.append(provisioner_image) + elif provisioner_image := self.static_local_volume_provisioner_image: + images_to_cache.append(provisioner_image) if self.params.get("k8s_use_chaos_mesh"): chaos_mesh_version = ChaosMesh.VERSION if not chaos_mesh_version.startswith("v"): @@ -689,7 +688,7 @@ def gather_k8s_logs(self) -> None: f"docker cp kind-control-plane:{src_container_path} {self.logdir} " f"&& mkdir -p {self.logdir}/{dst_subdir} " f"&& mv {self.logdir}/*/{log_prefix}* {self.logdir}/{dst_subdir}") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning( "Failed to copy K8S apiserver audit logs located at '%s'. Exception: \n%s", src_container_path, exc) diff --git a/sdcm/commit_log_check_thread.py b/sdcm/commit_log_check_thread.py index b39369c160..6a2ee64195 100644 --- a/sdcm/commit_log_check_thread.py +++ b/sdcm/commit_log_check_thread.py @@ -174,7 +174,7 @@ def run_thread(self): self.zero_free_segments_checker(self.start_time, interval_end_time) self.start_time = interval_end_time - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 trace = traceback.format_exc() CommitLogCheckErrorEvent( message=f"CommitLogCheckThread failed: {exc.__repr__()} with traceback {trace}").publish() @@ -234,7 +234,7 @@ def run(custer_tester, duration): try: thread = CommitLogCheckThread(custer_tester, duration) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 trace = traceback.format_exc() CommitLogCheckErrorEvent( message=f"CommitLogCheckThread.__init__ failed with unexpected exception:" @@ -242,7 +242,7 @@ def run(custer_tester, duration): else: try: thread.start() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 trace = traceback.format_exc() CommitLogCheckErrorEvent( message=f"CommitLogCheckThread.start failed with unexpected exception:" diff --git a/sdcm/coredump.py b/sdcm/coredump.py index 3719a4e74a..4b9d8bbc5e 100644 --- a/sdcm/coredump.py +++ b/sdcm/coredump.py @@ -114,7 +114,7 @@ def run(self): try: self.main_cycle_body() exceptions_count = 0 - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Following error occurred: %s", exc) exceptions_count += 1 if exceptions_count == self.max_coredump_thread_exceptions: @@ -172,7 +172,7 @@ def _process_coredumps( if result: uploaded.append(core_info) self.publish_event(core_info) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass @abstractmethod @@ -182,7 +182,7 @@ def get_list_of_cores(self) -> Optional[List[CoreDumpInfo]]: def publish_event(self, core_info: CoreDumpInfo): try: core_info.publish_event() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Failed to publish coredump event due to the: {str(exc)}") def extract_info_from_core_pids( @@ -261,7 +261,7 @@ def _pack_coredump(self, coredump: str) -> str: coredump += '.gz' except NETWORK_EXCEPTIONS: # pylint: disable=try-except-raise raise - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Failed to compress coredump '%s': %s", coredump, ex) return coredump @@ -295,7 +295,7 @@ def systemd_version(self): try: systemd_version = get_systemd_version(self.node.remoter.run( "systemctl --version", ignore_status=True).stdout) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("failed to get systemd version:", exc_info=True) return systemd_version @@ -397,7 +397,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # # Coredump could be absent when file was removed for line in coredump_info: - line = line.strip() + line = line.strip() # noqa: PLW2901 if line.startswith('Executable:'): executable = line[12:].strip() elif line.startswith('Command Line:'): @@ -407,7 +407,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # Storage: /var/lib/systemd/coredump/core.vi.1000.6c4de4c206a0476e88444e5ebaaac482.18554.1578994298000000.lz4 (inaccessible) if "inaccessible" in line: continue - line = line.replace('(present)', '') + line = line.replace('(present)', '') # noqa: PLW2901 corefile = line[line.find(':') + 1:].strip() elif line.startswith('Timestamp:'): timestring = None @@ -430,7 +430,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): else: raise ValueError(f'Date has unknown format: {timestring}') event_timestamp = datetime.strptime(timestring, fmt).timestamp() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"Failed to convert date '{line}' ({timestring}), due to error: {str(exc)}") core_info.update(executable=executable, command_line=command_line, corefile=corefile, source_timestamp=event_timestamp, coredump_info="\n".join(coredump_info)+"\n") diff --git a/sdcm/cql_stress_cassandra_stress_thread.py b/sdcm/cql_stress_cassandra_stress_thread.py index 3d4c3806d6..d1b6b4d9b5 100644 --- a/sdcm/cql_stress_cassandra_stress_thread.py +++ b/sdcm/cql_stress_cassandra_stress_thread.py @@ -57,7 +57,7 @@ def run(self) -> None: class CqlStressCassandraStressThread(CassandraStressThread): DOCKER_IMAGE_PARAM_NAME = 'stress_image.cql-stress-cassandra-stress' - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments # noqa: PLR0913 profile=None, node_list=None, round_robin=False, client_encrypt=False, stop_test_on_failure=True, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, @@ -157,7 +157,7 @@ def _run_cs_stress(self, loader, loader_idx, cpu_idx, keyspace_idx): # pylint: with SoftTimeoutContext(timeout=self.timeout, operation="cql-stress-cassandra-stress"): result = cmd_runner.run( cmd=node_cmd, timeout=hard_timeout, log_file=log_file_name, retry=0) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.configure_event_on_failure( stress_event=cs_stress_event, exc=exc) diff --git a/sdcm/db_log_reader.py b/sdcm/db_log_reader.py index a046de5862..69eb72c1bd 100644 --- a/sdcm/db_log_reader.py +++ b/sdcm/db_log_reader.py @@ -81,7 +81,7 @@ def __init__(self, def _continuous_event_patterns(self): return get_pattern_to_event_to_func_mapping(node=self._node_name) - def _read_and_publish_events(self) -> None: + def _read_and_publish_events(self) -> None: # noqa: PLR0912 """Search for all known patterns listed in `sdcm.sct_events.database.SYSTEM_ERROR_EVENTS'.""" # pylint: disable=too-many-branches,too-many-locals,too-many-statements @@ -98,7 +98,7 @@ def _read_and_publish_events(self) -> None: for index, line in enumerate(db_file, start=self._last_line_no + 1): if len(line) > LOG_LINE_MAX_PROCESSING_SIZE: # trim to avoid filling the memory when lot of long line is writen - line = line[:LOG_LINE_MAX_PROCESSING_SIZE] + line = line[:LOG_LINE_MAX_PROCESSING_SIZE] # noqa: PLW2901 # Postpone processing line with no ending in case if half of line is written to the disc if line[-1] == '\n' or self._skipped_end_line > 20: @@ -111,11 +111,11 @@ def _read_and_publish_events(self) -> None: if line[0] == '{': try: json_log = json.loads(line) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass if self._log_lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 for pattern in self.EXCLUDE_FROM_LOGGING: if pattern in line: break diff --git a/sdcm/db_stats.py b/sdcm/db_stats.py index 1151b7bfd5..2e47808b3f 100644 --- a/sdcm/db_stats.py +++ b/sdcm/db_stats.py @@ -126,7 +126,7 @@ def get_stress_cmd_params(cmd): del cmd_params['rate'] return cmd_params - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise CassandraStressCmdParseError(cmd=cmd, ex=ex) from None @@ -349,7 +349,7 @@ def get_scylla_scheduler_shares_per_sla(self, start_time, end_time, node_ip): # for item in results: try: res[item['metric']['group']] = {int(i[1]) for i in item['values']} - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 # average value may be returned not integer. Ignore it LOGGER.error("Failed to analyze results of query: %s\nResults: %s\nError: %s", query, results, error) return res @@ -607,17 +607,16 @@ def get_setup_details(self): for key, value in test_params: if key in exclude_details or (isinstance(key, str) and key.startswith('stress_cmd')): # pylint: disable=no-else-continue continue + elif is_gce and key in \ + ['instance_type_loader', # pylint: disable=no-else-continue + 'instance_type_monitor', + 'instance_type_db']: + # exclude these params from gce run + continue + elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc + setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) else: - if is_gce and key in \ - ['instance_type_loader', # pylint: disable=no-else-continue - 'instance_type_monitor', - 'instance_type_db']: - # exclude these params from gce run - continue - elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc - setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) - else: - setup_details[key] = value + setup_details[key] = value if self.params.get('cluster_backend') == 'aws': setup_details["ami_tags_db_scylla"] = [] @@ -728,7 +727,7 @@ def _calc_stats(self, ps_results): stat["stdev"] = stddev(ops_filtered) self.log.debug("Stats: %s", stat) return stat - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Exception when calculating PrometheusDB stats: %s" % ex) return {} @@ -774,7 +773,7 @@ def _convert_stat(self, stat, stress_result): return 0 try: return float(stress_result[stat]) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Error in conversion of '%s' for stat '%s': '%s'" "Discarding stat." % (stress_result[stat], stat, details)) return 0 diff --git a/sdcm/ec2_client.py b/sdcm/ec2_client.py index 901e741815..8cdb22ef64 100644 --- a/sdcm/ec2_client.py +++ b/sdcm/ec2_client.py @@ -69,7 +69,7 @@ def _get_ec2_client(self, region_name=None) -> EC2Client: boto3.setup_default_session(region_name=region_name) return self._get_ec2_client() - def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments + def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments # noqa: PLR0913 user_data='', count=1, duration=0, request_type='one-time', block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): """ diff --git a/sdcm/fill_db_data.py b/sdcm/fill_db_data.py index 50f86698ab..def7fcefdd 100644 --- a/sdcm/fill_db_data.py +++ b/sdcm/fill_db_data.py @@ -3118,7 +3118,7 @@ def cql_create_tables(self, session): with self._execute_and_log(f'Created tables for test "{test_name}" in {{}} seconds'): for create_table in item['create_tables']: if self.version_cdc_support(): - create_table = self._enable_cdc(item, create_table) + create_table = self._enable_cdc(item, create_table) # noqa: PLW2901 # wait a while before creating index, there is a delay of create table for # waiting the schema agreement if 'CREATE INDEX' in create_table.upper(): @@ -3327,7 +3327,7 @@ def fill_db_data(self): try: session.set_keyspace(self.base_ks) self.truncate_tables(session) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Found error in truncate tables: '%s'", ex) # Insert data to the tables according to the "inserts" and flush to disk in several cases (nodetool flush) @@ -3384,11 +3384,11 @@ def fill_table(): self.log.info('running now session.execute') full_query_res = self.rows_to_list(session.execute(statement)) if not full_query_res: - assert f'Query "{statement}" returned no entries' + assert f'Query "{statement}" returned no entries' # noqa: PLW0129 self.log.info('running now fetch_all_rows') full_res = self.rows_to_list( fetch_all_rows(session=session, default_fetch_size=100, statement=statement)) if not full_res: - assert f'Paged query "{statement}" returned no value' + assert f'Paged query "{statement}" returned no value' # noqa: PLW0129 self.log.info('will now compare results from session.execute and fetch_all_rows') self.assertEqual(sorted(full_query_res), sorted(full_res), "Results should be identical") diff --git a/sdcm/gemini_thread.py b/sdcm/gemini_thread.py index fd0eca6da3..241e4d6dd7 100644 --- a/sdcm/gemini_thread.py +++ b/sdcm/gemini_thread.py @@ -130,14 +130,14 @@ def _run_stress(self, loader, loader_idx, cpu_idx): ) # sleep to gather all latest log messages time.sleep(5) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(details) result = getattr(details, "result", NotGeminiErrorResult(details)) if result.exited: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.ERROR - else: + else: # noqa: PLR5501 if result.stderr: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.WARNING @@ -187,7 +187,7 @@ def _parse_gemini_summary_json(json_str): try: results = json.loads(json_str) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Invalid json document {}".format(details)) return results.get('result') diff --git a/sdcm/logcollector.py b/sdcm/logcollector.py index d67d6baa97..73a8825e3a 100644 --- a/sdcm/logcollector.py +++ b/sdcm/logcollector.py @@ -150,7 +150,7 @@ def get_monitoring_version(self, node): return None, None, None result = node.remoter.run( f"cat {basedir}/{name}/monitor_version", ignore_status=True, verbose=False) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to get monitoring version: %s", details) return None, None, None @@ -389,7 +389,7 @@ def get_grafana_annotations(self, grafana_ip: str) -> str: res = requests.get(f"http://{grafana_ip}:{self.grafana_port}/api/annotations") if res.ok: return res.text - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Unable to get Grafana annotations [%s]", details) return "" @@ -512,7 +512,7 @@ def get_grafana_screenshot(self, node, local_dst): return screenshots - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error taking monitor screenshot: %s, traceback: %s", details, traceback.format_exc()) return [] @@ -578,7 +578,7 @@ def create_remote_storage_dir(self, node, path=''): 'Remote storing folder not created.\n{}'.format(result)) remote_dir = self.node_remote_dir - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during creating remote directory %s", details) remote_dir = self.node_remote_dir @@ -623,7 +623,7 @@ def collect_logs_per_node(node): for log_entity in self.log_entities: try: log_entity.collect(node, local_node_dir, remote_node_dir, local_search_path=local_search_path) - except Exception as details: # pylint: disable=unused-variable, broad-except + except Exception as details: # pylint: disable=unused-variable, broad-except # noqa: BLE001 LOGGER.error("Error occured during collecting on host: %s\n%s", node.name, details) LOGGER.debug("Nodes list %s", [node.name for node in self.nodes]) @@ -636,7 +636,7 @@ def collect_logs_per_node(node): try: ParallelObject(self.nodes, num_workers=workers_number, timeout=self.collect_timeout).run( collect_logs_per_node, ignore_exceptions=True) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Error occured during collecting logs %s', details) if not os.listdir(self.local_dir): @@ -686,7 +686,7 @@ def archive_to_tarfile(self, src_path: str, add_test_id_to_archive: bool = False src_name = src_name.replace(extension, f"-{self.test_id.split('-')[0]}{extension}") try: return self._compress_file(src_path, src_name) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during archive creation. Details: \n%s", details) return None @@ -770,7 +770,7 @@ def save_kallsyms_map(node): try: log_entity.collect(node, node.logdir, remote_node_dir) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error occurred during collecting kallsyms on host: %s\n%s", node.name, details) @@ -797,7 +797,7 @@ def collect_log_entities(node, log_entities: List[BaseLogEntity]): try: log_entity.collect(node, node.logdir, remote_node_dir) LOGGER.debug("Diagnostic file '%s' collected", log_entity.name) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error occurred during collecting diagnostics data on host: %s\n%s", node.name, details) @@ -1180,7 +1180,7 @@ def collect_logs(self, local_search_path: Optional[str] = None) -> list[str]: k8s_logdir, '.kube', current_k8s_logdir_sub_file)): KubernetesOps.gather_k8s_logs(k8s_logdir) break - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Got following failure processing the K8S logs: %s", exc) return super().collect_logs(local_search_path=local_search_path) @@ -1377,7 +1377,7 @@ def find_and_append_cloud_manager_instance_to_collecting_nodes(self): instance = get_manager_instance_by_cluster_id(cluster_id=cloud_cluster_id) if not instance: raise ValueError(f"Cloud manager for the cluster {cloud_cluster_id} not found") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to get cloud manager instance. Error: %s", exc) return diff --git a/sdcm/mgmt/cli.py b/sdcm/mgmt/cli.py index 2765fb117a..5553132b6a 100644 --- a/sdcm/mgmt/cli.py +++ b/sdcm/mgmt/cli.py @@ -548,7 +548,7 @@ def create_restore_task(self, restore_schema=False, restore_data=False, location LOGGER.debug("Created task id is: {}".format(task_id)) return RestoreTask(task_id=task_id, cluster_id=self.id, manager_node=self.manager_node) - def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches + def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches # noqa: PLR0913 dry_run=None, interval=None, keyspace_list=None, cron=None, location_list=None, num_retries=None, rate_limit_list=None, retention=None, show_tables=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, legacy_args=None): @@ -1326,7 +1326,7 @@ def get(self, path, params=None): raise Exception(err_msg) try: return json.loads(resp.content) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Failed load data from json %s, error: %s', resp.content, ex) return resp.content diff --git a/sdcm/mgmt/operator.py b/sdcm/mgmt/operator.py index 05085132ce..8db746b619 100644 --- a/sdcm/mgmt/operator.py +++ b/sdcm/mgmt/operator.py @@ -223,7 +223,7 @@ def wait_for_healthchecks(self): throw_exc=True, ) - def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, + def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, # noqa: PLR0913 num_retries=None, rate_limit_list=None, retention=None, cron=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, name=None) -> ScyllaOperatorBackupTask: @@ -254,7 +254,7 @@ def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_lis return so_backup_task # pylint: disable=too-many-locals - def create_backup_task( + def create_backup_task( # noqa: PLR0913 self, dc_list=None, dry_run=None, diff --git a/sdcm/monitorstack/__init__.py b/sdcm/monitorstack/__init__.py index fb50926b76..3480a84467 100644 --- a/sdcm/monitorstack/__init__.py +++ b/sdcm/monitorstack/__init__.py @@ -37,7 +37,7 @@ class ErrorUploadAnnotations(Exception): pass -def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals +def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals # noqa: PLR0911 if not is_docker_available(): return False @@ -303,7 +303,7 @@ def get_monitoring_stack_scylla_version(monitoring_stack_dir): scylla_version = 'master' return monitoring_version, scylla_version - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return 'branch-3.0', 'master' @@ -313,7 +313,7 @@ def restore_grafana_dashboards_and_annotations(monitoring_dockers_dir, grafana_d status.append(restore_sct_dashboards(grafana_docker_port=grafana_docker_port, sct_dashboard_file=sct_dashboard_file)) status.append(restore_annotations_data(monitoring_dockers_dir, grafana_docker_port=grafana_docker_port)) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during uploading sct monitoring data %s", details) status.append(False) @@ -323,7 +323,7 @@ def restore_grafana_dashboards_and_annotations(monitoring_dockers_dir, grafana_d def run_monitoring_stack_containers(monitoring_stack_dir, monitoring_data_dir, scylla_version, tenants_number=1): try: return start_dockers(monitoring_stack_dir, monitoring_data_dir, scylla_version, tenants_number) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Dockers are not started. Error: %s", details) return {} @@ -504,7 +504,7 @@ def verify_grafana_is_available(grafana_docker_port=GRAFANA_DOCKER_PORT): title=dashboard.title) grafana_statuses.append(result) LOGGER.info("Dashboard {} is available".format(dashboard.title)) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Dashboard %s is not available. Error: %s", dashboard.title, details) grafana_statuses.append(False) @@ -535,7 +535,7 @@ def verify_prometheus_is_available(prometheus_docker_port=PROMETHEUS_DOCKER_PORT prom_client.get_throughput(time_start, time_end) LOGGER.info("Prometheus is up") return True - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error requesting prometheus %s", details) return False diff --git a/sdcm/ndbench_thread.py b/sdcm/ndbench_thread.py index 2ed87c8638..1ae653c6ef 100644 --- a/sdcm/ndbench_thread.py +++ b/sdcm/ndbench_thread.py @@ -106,7 +106,7 @@ def run(self): operation, name = key.split('_', 1) self.set_metric(operation, name, float(value)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failed to send metric. Failed with exception {exc}".format(exc=exc)) @@ -155,7 +155,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): retry=0, ) return docker_run_result - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 NdBenchStressEvent.failure(node=str(loader), stress_cmd=self.stress_cmd, log_file_name=log_file_name, diff --git a/sdcm/nemesis.py b/sdcm/nemesis.py index f05080414f..dc42c0e7bc 100644 --- a/sdcm/nemesis.py +++ b/sdcm/nemesis.py @@ -261,10 +261,10 @@ def _init_num_deletions_factor(self): if not isinstance(stress_cmds, list): stress_cmds = [stress_cmds] for stress_cmd in stress_cmds: - stress_cmd = stress_cmd.split() + stress_cmd_splitted = stress_cmd.split() # In case background load has writes, we can delete all available partitions, # since they are rewritten. Otherwise, we can only delete some of them. - if 'scylla-bench' in stress_cmd and '-mode=write' in stress_cmd: + if 'scylla-bench' in stress_cmd_splitted and '-mode=write' in stress_cmd_splitted: self.num_deletions_factor = 1 break @@ -480,7 +480,7 @@ def _is_it_on_kubernetes(self) -> bool: return isinstance(getattr(self.tester, "db_cluster", None), PodCluster) # pylint: disable=too-many-arguments,unused-argument - def get_list_of_methods_by_flags( # pylint: disable=too-many-locals + def get_list_of_methods_by_flags( # pylint: disable=too-many-locals # noqa: PLR0913 self, disruptive: Optional[bool] = None, run_with_gemini: Optional[bool] = None, @@ -595,7 +595,7 @@ def _get_subclasses_from_list( def __str__(self): try: return str(self.__class__).split("'")[1] - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return str(self.__class__) def _kill_scylla_daemon(self): @@ -1023,7 +1023,7 @@ def replace_full_file_name_to_prefix(self, one_file, ks_cf_for_destroy): try: file_name_template = re.search(r"([^-]+-[^-]+)-", file_name).group(1) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.debug('File name "{file_name}" is not as expected for Scylla data files. ' 'Search files for "{ks_cf_for_destroy}" table'.format(file_name=file_name, ks_cf_for_destroy=ks_cf_for_destroy)) @@ -1373,9 +1373,9 @@ def _verify_resharding_on_k8s(self, cpus, dc_idx): # Check that liveness probe didn't report any errors # https://github.com/scylladb/scylla-operator/issues/894 - liveness_probe_failures = list(liveness_probe_failures) - assert not liveness_probe_failures, ( - f"There are liveness probe failures: {liveness_probe_failures}") + liveness_probe_failures_return = list(liveness_probe_failures) + assert not liveness_probe_failures_return, ( + f"There are liveness probe failures: {liveness_probe_failures_return}") self.log.info("Resharding has successfully ended on whole Scylla cluster.") @@ -2119,7 +2119,7 @@ def _add_drop_column_run_cql_query(self, cmd, ks, with self.cluster.cql_connection_patient(self.target_node, keyspace=ks) as session: session.default_consistency_level = consistency_level session.execute(cmd) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.debug(f"Add/Remove Column Nemesis: CQL query '{cmd}' execution has failed with error '{str(exc)}'") return False return True @@ -2228,7 +2228,7 @@ def choose_partitions_for_delete(self, partitions_amount, ks_cf, with_clustering cmd = f"select ck from {ks_cf} where pk={partition_key} order by ck desc limit 1" try: result = session.execute(SimpleStatement(cmd, fetch_size=1), timeout=300) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(str(exc)) continue @@ -2739,12 +2739,11 @@ def set_new_twcs_settings(settings: Dict[str, Any]) -> Dict[str, Any]: current_size = 3 else: current_size += 10 + elif (current_size // 60) > 10: + current_unit = "HOURS" + current_size = 11 else: - if (current_size // 60) > 10: - current_unit = "HOURS" - current_size = 11 - else: - current_size += 35 + current_size += 35 settings["gc"] = current_size * multiplier * expected_sstable_number // 2 settings["dttl"] = current_size * multiplier * expected_sstable_number @@ -3229,7 +3228,7 @@ def get_ks_with_few_tables(keyspace_table): nodetool_cmd = snapshot_option[0]() if len(snapshot_option) == 1 else snapshot_option[0](snapshot_option[1]) if not nodetool_cmd: raise ValueError("Failed to get nodetool command.") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 raise ValueError(f"Failed to get nodetool command. Error: {exc}") from exc self.log.debug(f'Take snapshot with command: {nodetool_cmd}') @@ -3496,7 +3495,7 @@ def remove_node(): for node in up_normal_nodes: try: self.repair_nodetool_repair(node=node, publish_event=False) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.error(f"failed to execute repair command " f"on node {node} due to the following error: {str(details)}") @@ -3718,7 +3717,7 @@ def _run_commands_wait_and_cleanup( # pylint: disable=too-many-arguments cmd_executed[cmd_num] = True if wait_time: time.sleep(wait_time) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 cmd_executed[cmd_num] = False self.log.error( f"{name}: failed to execute start command " @@ -3728,7 +3727,7 @@ def _run_commands_wait_and_cleanup( # pylint: disable=too-many-arguments for cmd_num, cmd in enumerate(cleanup_commands): try: node.remoter.run(cmd) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.debug(f"{name}: failed to execute cleanup command " f"{cmd} on node {node} due to the following error: {str(exc)}") @@ -3815,7 +3814,7 @@ def decommission_post_action(): except Group0MembersNotConsistentWithTokenRingMembersException as exc: self.log.error("Cluster state could be not predictable due to ghost members in raft group0: %s", exc) raise - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Unexpected exception raised in checking decommission status: %s', exc) self.log.info('Decommission might complete before stopping it. Re-add a new node') @@ -3999,7 +3998,7 @@ def decommission_nodes(self, add_nodes_number, rack, is_seed: Optional[Union[boo InfoEvent(f'StartEvent - ShrinkCluster started decommissioning a node {self.target_node}').publish() self.decommission_node(self.target_node) InfoEvent(f'FinishEvent - ShrinkCluster has done decommissioning a node {self.target_node}').publish() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 InfoEvent(f'FinishEvent - ShrinkCluster failed decommissioning a node {self.target_node} with error ' f'{str(exc)}').publish() @@ -5009,7 +5008,7 @@ def disrupt_disable_binary_gossip_execute_major_compaction(self): raise -def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements +def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements # noqa: PLR0915 """ Log time elapsed for method to run @@ -5063,7 +5062,7 @@ def data_validation_prints(args): args[0].tester.data_validator.validate_range_not_expected_to_change(session, during_nemesis=True) args[0].tester.data_validator.validate_range_expected_to_change(session, during_nemesis=True) args[0].tester.data_validator.validate_deleted_rows(session, during_nemesis=True) - except Exception as err: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except # noqa: BLE001 args[0].log.debug(f'Data validator error: {err}') @wraps(method) @@ -5133,7 +5132,7 @@ def wrapper(*args, **kwargs): # pylint: disable=too-many-statements log_info.update({'subtype': 'skipped', 'skip_reason': skip_reason}) nemesis_event.skip(skip_reason=skip_reason) raise - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 nemesis_event.add_error([str(details)]) nemesis_event.full_traceback = traceback.format_exc() nemesis_event.severity = Severity.ERROR @@ -5162,7 +5161,7 @@ def wrapper(*args, **kwargs): # pylint: disable=too-many-statements except ElasticSearchConnectionTimeout as err: args[0].log.warning(f"Connection timed out when attempting to update elasticsearch statistics:\n" f"{err}") - except Exception as err: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except # noqa: BLE001 args[0].log.warning(f"Unexpected error when attempting to update elasticsearch statistics:\n" f"{err}") args[0].log.info(f"log_info: {log_info}") @@ -5576,17 +5575,17 @@ def prefixed(pref: str, val: str) -> str: weights: List[float] = [] listed_methods: Set[str] = set() - for name, weight in dist.items(): - name = str(name) + for _name, _weight in dist.items(): + name = str(_name) prefixed_name = prefixed('disrupt_', name) if prefixed_name not in all_methods: raise ValueError(f"'{name}' is not a valid disruption. All methods: {all_methods.keys()}") - if not is_nonnegative_number(weight): + if not is_nonnegative_number(_weight): raise ValueError("Each disruption weight must be a non-negative number." " '{weight}' is not a valid weight.") - weight = float(weight) + weight = float(_weight) if weight > 0: population.append(all_methods[prefixed_name]) weights.append(weight) diff --git a/sdcm/nosql_thread.py b/sdcm/nosql_thread.py index 939429af6f..be07056a61 100644 --- a/sdcm/nosql_thread.py +++ b/sdcm/nosql_thread.py @@ -111,6 +111,6 @@ def _run_stress(self, loader, loader_idx, cpu_idx): f'{self.docker_image_name} ' f'{stress_cmd} --report-graphite-to graphite-exporter:9109', timeout=self.timeout + self.shutdown_timeout, log_file=log_file_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.configure_event_on_failure(stress_event=stress_event, exc=exc) return None diff --git a/sdcm/prometheus.py b/sdcm/prometheus.py index 53d1c77c1c..5361540abd 100644 --- a/sdcm/prometheus.py +++ b/sdcm/prometheus.py @@ -61,14 +61,14 @@ def start_metrics_server(): ip = get_my_ip() LOGGER.info('prometheus API server running on port: %s', port) return '{}:{}'.format(ip, port) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Cannot start local http metrics server: %s', ex) return None def nemesis_metrics_obj(metric_name_suffix=''): - global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned + global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned # noqa: PLW0602 if not NM_OBJ.get(metric_name_suffix): NM_OBJ[metric_name_suffix] = NemesisMetrics(metric_name_suffix) return NM_OBJ[metric_name_suffix] @@ -97,7 +97,7 @@ def __init__(self, metric_name_suffix=''): def create_counter(name, desc, param_list): try: return prometheus_client.Counter(name, desc, param_list) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Cannot create metrics counter: %s', ex) return None @@ -105,7 +105,7 @@ def create_counter(name, desc, param_list): def create_gauge(name, desc, param_list): try: return prometheus_client.Gauge(name, desc, param_list) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Cannot create metrics gauge: %s', ex) return None @@ -138,7 +138,7 @@ def __init__(self, ip, port=9093, interval=10, stop_flag: threading.Event = None def is_alert_manager_up(self): try: return requests.get(f"{self._alert_manager_url}/status", timeout=3).json()['cluster']['status'] == 'ready' - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return False @log_run_info @@ -184,7 +184,7 @@ def _publish_end_of_alerts(self, alerts: dict): for alert in alerts.values(): if not alert.get('endsAt', None): alert['endsAt'] = time.strftime("%Y-%m-%dT%H:%M:%S.0Z", time.gmtime()) - alert = updated_dict.get(alert['fingerprint'], alert) + alert = updated_dict.get(alert['fingerprint'], alert) # noqa: PLW2901 labels = alert.get("labels") or {} alert_name = labels.get("alertname", "") node = labels.get("instance", "N/A") diff --git a/sdcm/provision/aws/capacity_reservation.py b/sdcm/provision/aws/capacity_reservation.py index 313e2c4859..e6ddbc5402 100644 --- a/sdcm/provision/aws/capacity_reservation.py +++ b/sdcm/provision/aws/capacity_reservation.py @@ -210,7 +210,7 @@ def _create(ec2, test_id, availability_zone, instance_type, instance_count, dura **additional_params ) return response['CapacityReservation']['CapacityReservationId'] - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to create capacity reservation for %s. Error: %s", instance_type, exc) return None diff --git a/sdcm/provision/aws/provisioner.py b/sdcm/provision/aws/provisioner.py index b258479bcd..0772f3bba1 100644 --- a/sdcm/provision/aws/provisioner.py +++ b/sdcm/provision/aws/provisioner.py @@ -208,7 +208,7 @@ def _get_provisioned_fleet_instance_ids( try: resp = self._ec2_client(provision_parameters).describe_spot_fleet_requests(SpotFleetRequestIds=request_ids) LOGGER.info("%s: - %s", request_ids, resp) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("%s: - failed to get status: %s", request_ids, exc) return [] for req in resp['SpotFleetRequestConfigs']: diff --git a/sdcm/provision/aws/utils.py b/sdcm/provision/aws/utils.py index 3adc80c317..1ff5551a0d 100644 --- a/sdcm/provision/aws/utils.py +++ b/sdcm/provision/aws/utils.py @@ -145,7 +145,7 @@ def wait_for_provision_request_done( def get_provisioned_fleet_instance_ids(region_name: str, request_ids: List[str]) -> Optional[List[str]]: try: resp = ec2_clients[region_name].describe_spot_fleet_requests(SpotFleetRequestIds=request_ids) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return [] for req in resp['SpotFleetRequestConfigs']: if req['SpotFleetRequestState'] == 'active' and req.get('ActivityStatus', None) == STATUS_FULFILLED: @@ -168,7 +168,7 @@ def get_provisioned_fleet_instance_ids(region_name: str, request_ids: List[str]) for request_id in request_ids: try: resp = ec2_clients[region_name].describe_spot_fleet_instances(SpotFleetRequestId=request_id) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return None provisioned_instances.extend([inst['InstanceId'] for inst in resp['ActiveInstances']]) return provisioned_instances @@ -182,7 +182,7 @@ def get_provisioned_spot_instance_ids(region_name: str, request_ids: List[str]) """ try: resp = ec2_clients[region_name].describe_spot_instance_requests(SpotInstanceRequestIds=request_ids) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return [] provisioned = [] for req in resp['SpotInstanceRequests']: diff --git a/sdcm/provision/azure/ip_provider.py b/sdcm/provision/azure/ip_provider.py index 98e7b90051..d996b37f88 100644 --- a/sdcm/provision/azure/ip_provider.py +++ b/sdcm/provision/azure/ip_provider.py @@ -37,7 +37,7 @@ def __post_init__(self): try: ips = self._azure_service.network.public_ip_addresses.list(self._resource_group_name) for ip in ips: - ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) + ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) # noqa: PLW2901 self._cache[ip.name] = ip except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/network_interface_provider.py b/sdcm/provision/azure/network_interface_provider.py index 78eef78907..eb257e97e9 100644 --- a/sdcm/provision/azure/network_interface_provider.py +++ b/sdcm/provision/azure/network_interface_provider.py @@ -36,7 +36,7 @@ def __post_init__(self): try: nics = self._azure_service.network.network_interfaces.list(self._resource_group_name) for nic in nics: - nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) + nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) # noqa: PLW2901 self._cache[nic.name] = nic except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/virtual_machine_provider.py b/sdcm/provision/azure/virtual_machine_provider.py index 72d827f06e..77159e797e 100644 --- a/sdcm/provision/azure/virtual_machine_provider.py +++ b/sdcm/provision/azure/virtual_machine_provider.py @@ -43,8 +43,8 @@ def __post_init__(self): """Discover existing virtual machines for resource group.""" try: v_ms = self._azure_service.compute.virtual_machines.list(self._resource_group_name) - for v_m in v_ms: - v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, v_m.name) + for _v_m in v_ms: + v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, _v_m.name) if v_m.provisioning_state != "Deleting": self._cache[v_m.name] = v_m except ResourceNotFoundError: diff --git a/sdcm/provision/azure/virtual_network_provider.py b/sdcm/provision/azure/virtual_network_provider.py index a3c959fa44..cb510cd56a 100644 --- a/sdcm/provision/azure/virtual_network_provider.py +++ b/sdcm/provision/azure/virtual_network_provider.py @@ -36,8 +36,8 @@ def __post_init__(self): """Discover existing virtual networks for resource group.""" try: vnets = self._azure_service.network.virtual_networks.list(self._resource_group_name) - for vnet in vnets: - vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, vnet.name) + for _vnet in vnets: + vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, _vnet.name) self._cache[vnet.name] = vnet except ResourceNotFoundError: pass diff --git a/sdcm/provision/scylla_yaml/scylla_yaml.py b/sdcm/provision/scylla_yaml/scylla_yaml.py index b1d4b83fbd..9729c4e8af 100644 --- a/sdcm/provision/scylla_yaml/scylla_yaml.py +++ b/sdcm/provision/scylla_yaml/scylla_yaml.py @@ -372,7 +372,7 @@ def _update_dict(self, obj: dict, fields_data: dict): if not isinstance(attr_value, dict): raise ValueError("Unexpected data `%s` in attribute `%s`" % ( type(attr_value), attr_name)) - attr_value = attr_info.type(**attr_value) + attr_value = attr_info.type(**attr_value) # noqa: PLW2901 setattr(self, attr_name, attr_value) def update(self, *objects: Union['ScyllaYaml', dict]): diff --git a/sdcm/remote/libssh2_client/__init__.py b/sdcm/remote/libssh2_client/__init__.py index 8bc7e8ed5e..f23ef9a14f 100644 --- a/sdcm/remote/libssh2_client/__init__.py +++ b/sdcm/remote/libssh2_client/__init__.py @@ -76,7 +76,7 @@ def run(self): try: self._read_output(self._session, self._channel, self._timeout, self._timeout_read_data, self.stdout, self.stderr) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.raised = exc def _read_output( # pylint: disable=too-many-arguments,too-many-branches @@ -150,7 +150,7 @@ def run(self): try: time_to_wait = self._session.eagain( self._session.keepalive_send, timeout=self._keepalive_timeout) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 time_to_wait = self._keepalive_timeout sleep(time_to_wait) @@ -356,7 +356,7 @@ def _auth(self): with self.session.lock: self.session.agent_auth(self.user) return - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass self._password_auth() @@ -386,7 +386,7 @@ def _password_auth(self): self.session.eagain( self.session.userauth_password, args=(self.user, self.password), timeout=self.timings.auth_timeout) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 raise AuthenticationException("Password authentication failed") from error @staticmethod @@ -403,7 +403,7 @@ def _init_socket(self, host: str, port: int): if self.sock: try: self.sock.close() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass family = self._get_socket_family(host) if family is None: @@ -447,7 +447,7 @@ def _process_output( # pylint: disable=too-many-arguments, too-many-branches stdout_stream.write(data) for watcher in watchers: watcher.submit_line(data) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass if stderr_stream is not None: if reader.stderr.qsize(): @@ -457,7 +457,7 @@ def _process_output( # pylint: disable=too-many-arguments, too-many-branches stderr_stream.write(data) for watcher in watchers: watcher.submit_line(data) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass return True @@ -502,7 +502,7 @@ def connect(self, timeout: NullableTiming = __DEFAULT__): try: with self.flood_preventing.get_lock(self): self._connect() - except Exception as exc: + except Exception as exc: # noqa: BLE001 self.disconnect() raise ConnectError(str(exc)) from exc return @@ -517,7 +517,7 @@ def connect(self, timeout: NullableTiming = __DEFAULT__): except AuthenticationError: self.disconnect() raise # pylint: disable=broad-except - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.disconnect() if perf_counter() > end_time: ex_msg = f'Failed to connect in {timeout} seconds, last error: ({type(exc).__name__}){str(exc)}' @@ -540,14 +540,14 @@ def disconnect(self): if self.session is not None: try: self.session.eagain(self.session.disconnect) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass del self.session self.session = None if self.sock is not None: try: self.sock.close() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass self.sock = None @@ -587,12 +587,12 @@ def run( # pylint: disable=unused-argument,too-many-arguments,too-many-locals if self.session is None: self.connect() channel = self.open_channel() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 return self._complete_run( channel, FailedToRunCommand(result, exc), timeout_reached, timeout, result, warn, stdout, stderr) try: self._apply_env(channel, env) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 return self._complete_run( channel, FailedToRunCommand(result, exc), timeout_reached, timeout, result, warn, stdout, stderr) if watchers: @@ -602,7 +602,7 @@ def run( # pylint: disable=unused-argument,too-many-arguments,too-many-locals self.execute(command, channel=channel, use_pty=False) self._process_output(watchers, encoding, stdout, stderr, reader, timeout, self.timings.interactive_read_data_chunk_timeout) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 exception = FailedToReadCommandOutput(result, exc) if reader.is_alive(): reader.stop() @@ -615,7 +615,7 @@ def run( # pylint: disable=unused-argument,too-many-arguments,too-many-locals timeout_reached = not self._process_output_no_watchers( self.session, channel, encoding, stdout, stderr, timeout, self.timings.read_data_chunk_timeout) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 exception = FailedToReadCommandOutput(result, exc) return self._complete_run(channel, exception, timeout_reached, timeout, result, warn, stdout, stderr) @@ -637,11 +637,11 @@ def _complete_run(self, channel: Channel, exception: Exception, # pylint: disab try: with self.session.lock: channel.close() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 print(f'Failed to close channel due to the following error: {exc}') try: self.session.eagain(channel.wait_closed, timeout=self.timings.channel_close_timeout) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 print(f'Failed to close channel due to the following error: {exc}') exit_status = channel.get_exit_status() self.session.drop_channel(channel) @@ -679,7 +679,7 @@ def open_channel(self) -> Channel: chan = self.session.eagain(self.session.open_session) if chan != LIBSSH2_ERROR_EAGAIN: break - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass delay = next(delay_iter, delay) sleep(delay) diff --git a/sdcm/remote/libssh2_client/exceptions.py b/sdcm/remote/libssh2_client/exceptions.py index 5e98a37c84..b6f843cfcd 100644 --- a/sdcm/remote/libssh2_client/exceptions.py +++ b/sdcm/remote/libssh2_client/exceptions.py @@ -102,11 +102,10 @@ def streams_for_display(self) -> tuple: stdout = self.result.tail("stdout") if self.result.pty: stderr = " n/a (PTYs have no stderr)" + elif "stderr" not in self.result.hide: + stderr = already_printed else: - if "stderr" not in self.result.hide: - stderr = already_printed - else: - stderr = self.result.tail("stderr") + stderr = self.result.tail("stderr") return stdout, stderr def __repr__(self) -> str: diff --git a/sdcm/remote/libssh2_client/session.py b/sdcm/remote/libssh2_client/session.py index 20ddf01657..b4ae1302b7 100644 --- a/sdcm/remote/libssh2_client/session.py +++ b/sdcm/remote/libssh2_client/session.py @@ -74,11 +74,11 @@ def drop_channel(self, channel: Channel): self.channels.remove(channel) try: channel.close() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass try: channel.wait_closed() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass del channel diff --git a/sdcm/remote/remote_base.py b/sdcm/remote/remote_base.py index 10cdea1e48..9e97e78ff6 100644 --- a/sdcm/remote/remote_base.py +++ b/sdcm/remote/remote_base.py @@ -606,7 +606,7 @@ def _run(): except self.exception_retryable as exc: if self._run_on_retryable_exception(exc, new_session): raise - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 if self._run_on_exception(exc, verbose, ignore_status): raise return None diff --git a/sdcm/remote/remote_cmd_runner.py b/sdcm/remote/remote_cmd_runner.py index 1e114b9012..2d099e66cc 100644 --- a/sdcm/remote/remote_cmd_runner.py +++ b/sdcm/remote/remote_cmd_runner.py @@ -65,7 +65,7 @@ def _ssh_ping(self) -> bool: self.log.debug("%s: sleeping %s seconds before next retry", auth_exception, self.auth_sleep_time) self.ssh_up_thread_termination.wait(self.auth_sleep_time) return False - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self.log.debug(details) return False diff --git a/sdcm/remote/remote_libssh_cmd_runner.py b/sdcm/remote/remote_libssh_cmd_runner.py index 26fd97c4df..60ca8c107c 100644 --- a/sdcm/remote/remote_libssh_cmd_runner.py +++ b/sdcm/remote/remote_libssh_cmd_runner.py @@ -54,11 +54,11 @@ def is_up(self, timeout: float = 30) -> bool: try: if self.connection.check_if_alive(timeout): return True - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 try: self.connection.close() self.connection.open(timeout) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass return False @@ -68,11 +68,11 @@ def _run_on_retryable_exception(self, exc: Exception, new_session: bool) -> bool self.log.debug('Reestablish the session...') try: self.connection.disconnect() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass try: self.connection.connect() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass if self._is_error_retryable(str(exc)) or isinstance(exc, self.exception_retryable): raise RetryableNetworkException(str(exc), original=exc) diff --git a/sdcm/results_analyze/__init__.py b/sdcm/results_analyze/__init__.py index a0d8e0ebea..92407f371d 100644 --- a/sdcm/results_analyze/__init__.py +++ b/sdcm/results_analyze/__init__.py @@ -348,7 +348,7 @@ def sort_results_by_versions(results: list[Any]): key=lambda version: version, reverse=True)} return best_results - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Search best results per version failed. Error: %s", exc) return {} @@ -403,7 +403,7 @@ def _calculate_relative_change_magnitude(current_value, best_value): best['average_time_operation_in_sec_diff'] = _calculate_relative_change_magnitude( current_result[nemesis]['average_time_operation_in_sec'], best['average_time_operation_in_sec']) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Compare results failed: %s", exc) def check_regression(self, test_id, data, is_gce=False, node_benchmarks=None, email_subject_postfix=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements, too-many-arguments @@ -1138,9 +1138,9 @@ def _add_best_for_info(test, subtest, metric_path, tests_info): def _mark_best_tests(self, prior_subtests, metrics, tests_info, main_test_id): main_tests_by_id = MagicList(tests_info.keys()).group_by('test_id') - for _, prior_tests in prior_subtests.items(): + for _, _prior_tests in prior_subtests.items(): prior_tests = MagicList( - [prior_test for prior_test in prior_tests if prior_test.main_test_id != main_test_id]) + [prior_test for prior_test in _prior_tests if prior_test.main_test_id != main_test_id]) if not prior_tests: continue for metric_path in metrics: @@ -1235,7 +1235,7 @@ def _cleanup_not_complete_main_tests(prior_main_tests: list, prior_subtests: dic for num in sorted(to_delete, reverse=True): prior_tests.pop(num) - def check_regression_multi_baseline( + def check_regression_multi_baseline( # noqa: PLR0912, PLR0915 self, test_id, subtests_info: list = None, diff --git a/sdcm/results_analyze/test.py b/sdcm/results_analyze/test.py index 99dd408ff2..a7dc8645d6 100644 --- a/sdcm/results_analyze/test.py +++ b/sdcm/results_analyze/test.py @@ -437,7 +437,7 @@ def is_gce(self): def _get_es_filters(cls, depth=2): tmp = [] for es_filter in cls._get_all_es_data_mapping().values(): - es_filter = '.'.join(es_filter.split('.')[:depth]) + es_filter = '.'.join(es_filter.split('.')[:depth]) # noqa: PLW2901 if es_filter not in tmp: tmp.append(es_filter) return ['hits.hits.' + es_filter for es_filter in tmp] @@ -459,11 +459,11 @@ def _get_es_query_from_instance_data(cls, instance_data: dict): def _get_es_query_from_es_data(cls, es_data: dict): filters = [] for es_data_path, data_value in es_data.items(): - es_data_path = es_data_path.split('.') + es_data_path = es_data_path.split('.') # noqa: PLW2901 if es_data_path[0] == '_source': - es_data_path = es_data_path[1:] - es_data_path = '.'.join(es_data_path) - es_data_path = cls._escape_filter_key(es_data_path) + es_data_path = es_data_path[1:] # noqa: PLW2901 + es_data_path = '.'.join(es_data_path) # noqa: PLW2901 + es_data_path = cls._escape_filter_key(es_data_path) # noqa: PLW2901 if isinstance(data_value, str) and es_data_path not in cls._es_field_indexes and data_value != '*': filters.append(f'{es_data_path}.keyword: \"{data_value}\"') elif isinstance(data_value, bool): @@ -495,7 +495,7 @@ def get_by_params(cls, es_index=es_index, **params): size=10000, filter_path=filter_path, ) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Unable to find ES data: %s", exc) es_data = None @@ -566,7 +566,7 @@ def get_prior_tests(self, filter_path=None) -> typing.List['TestResultClass']: filter_path=filter_path, ) es_result = es_result.get('hits', {}).get('hits', None) if es_result else None - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Unable to find ES data: %s", exc) es_result = None diff --git a/sdcm/scan_operation_thread.py b/sdcm/scan_operation_thread.py index 9abae41060..a97adee104 100644 --- a/sdcm/scan_operation_thread.py +++ b/sdcm/scan_operation_thread.py @@ -155,7 +155,7 @@ def run_scan_event(self, cmd: str, self.fetch_result_pages(result=result, read_pages=self.fullscan_stats.read_pages) if not scan_op_event.message: scan_op_event.message = f"{type(self).__name__} operation ended successfully" - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(traceback.format_exc()) msg = repr(exc) self.current_operation_stat.exceptions.append(repr(exc)) @@ -244,7 +244,7 @@ def get_table_clustering_order(self) -> str: session.default_consistency_level = ConsistencyLevel.ONE return get_table_clustering_order(ks_cf=self.fullscan_params.ks_cf, ck_name=self.fullscan_params.ck_name, session=session) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error(traceback.format_exc()) self.log.error('Failed getting table %s clustering order through node %s : %s', self.fullscan_params.ks_cf, node.name, diff --git a/sdcm/sct_config.py b/sdcm/sct_config.py index 9e4a9cbd8e..0275637555 100644 --- a/sdcm/sct_config.py +++ b/sdcm/sct_config.py @@ -95,7 +95,7 @@ def str_or_list_or_eval(value: Union[str, List[str]]) -> List[str]: if isinstance(value, str): try: return ast.literal_eval(value) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass return [str(value), ] @@ -104,7 +104,7 @@ def str_or_list_or_eval(value: Union[str, List[str]]) -> List[str]: for val in value: try: ret_values += [ast.literal_eval(val)] - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 ret_values += [str(val)] return ret_values @@ -115,7 +115,7 @@ def int_or_list(value): try: value = int(value) return value - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass if isinstance(value, str): @@ -123,11 +123,11 @@ def int_or_list(value): values = value.split() [int(v) for v in values] # pylint: disable=expression-not-assigned return value - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass try: return ast.literal_eval(value) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass raise ValueError("{} isn't int or list".format(value)) @@ -137,7 +137,7 @@ def dict_or_str(value): if isinstance(value, str): try: return ast.literal_eval(value) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass if isinstance(value, dict): return value @@ -1669,7 +1669,7 @@ class SCTConfiguration(dict): ami_id_params = ['ami_id_db_scylla', 'ami_id_loader', 'ami_id_monitor', 'ami_id_db_cassandra', 'ami_id_db_oracle'] aws_supported_regions = ['eu-west-1', 'eu-west-2', 'us-west-2', 'us-east-1', 'eu-north-1', 'eu-central-1'] - def __init__(self): + def __init__(self): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-branches,too-many-statements super().__init__() self.scylla_version = None @@ -1759,7 +1759,7 @@ def __init__(self): ami = get_branched_ami(scylla_version=scylla_version, region_name=region, arch=aws_arch)[0] else: ami = get_scylla_ami_versions(version=scylla_version, region_name=region, arch=aws_arch)[0] - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise ValueError(f"AMIs for scylla_version='{scylla_version}' not found in {region} " f"arch={aws_arch}") from ex self.log.debug("Found AMI %s(%s) for scylla_version='%s' in %s", @@ -1773,7 +1773,7 @@ def __init__(self): else: # gce_image.name format examples: scylla-4-3-6 or scylla-enterprise-2021-1-2 gce_image = get_scylla_gce_images_versions(version=scylla_version)[0] - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise ValueError(f"GCE image for scylla_version='{scylla_version}' was not found") from ex self.log.debug("Found GCE image %s for scylla_version='%s'", gce_image.name, scylla_version) @@ -1837,7 +1837,7 @@ def __init__(self): else: ami = get_scylla_ami_versions(version=oracle_scylla_version, region_name=region, arch=aws_arch)[0] - except Exception as ex: + except Exception as ex: # noqa: BLE001 raise ValueError(f"AMIs for oracle_scylla_version='{scylla_version}' not found in {region} " f"arch={aws_arch}") from ex @@ -2021,7 +2021,7 @@ def _load_environment_variables(self): if opt['env'] in os.environ: try: environment_vars[opt['name']] = opt['type'](os.environ[opt['env']]) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 raise ValueError( "failed to parse {} from environment variable".format(opt['env'])) from ex nested_keys = [key for key in os.environ if key.startswith(opt['env'] + '.')] @@ -2074,7 +2074,7 @@ def _validate_value(self, opt): opt['is_k8s_multitenant_value'] = False try: opt['type'](self.get(opt['name'])) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 if not (self.get("cluster_backend").startswith("k8s") and self.get("k8s_tenants_num") > 1 and opt.get("k8s_multitenancy_supported") @@ -2086,7 +2086,7 @@ def _validate_value(self, opt): for list_element in self.get(opt['name']): try: opt['type'](list_element) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 raise ValueError("failed to validate {}".format(opt['name'])) from ex opt['is_k8s_multitenant_value'] = True @@ -2113,7 +2113,7 @@ def list_of_stress_tools(self) -> Set[str]: if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: if stress_tool := cmd.split(maxsplit=2)[0]: stress_tools.add(stress_tool) @@ -2133,9 +2133,9 @@ def check_required_files(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if cmd.startswith('latte'): script_name_regx = re.compile(r'([/\w-]*\.rn)') script_name = script_name_regx.search(cmd).group(1) @@ -2146,7 +2146,7 @@ def check_required_files(self): continue for option in cmd.split(): if option.startswith('profile='): - option = option.split('=', 1) + option = option.split('=', 1) # noqa: PLW2901 if len(option) < 2: continue profile_path = option[1] @@ -2581,9 +2581,9 @@ def _verify_scylla_bench_mode_and_workload_parameters(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if not cmd.startswith('scylla-bench'): continue if "-mode=" not in cmd: diff --git a/sdcm/sct_events/base.py b/sdcm/sct_events/base.py index 8c8548818a..8ee3be03d5 100644 --- a/sdcm/sct_events/base.py +++ b/sdcm/sct_events/base.py @@ -323,7 +323,7 @@ def __del__(self): warning = f"[SCT internal warning] {self} has not been published or dumped, maybe you missed .publish()" try: LOGGER.warning(warning) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 print(f"Exception while printing {warning}. Full exception: {exc}") diff --git a/sdcm/sct_events/decorators.py b/sdcm/sct_events/decorators.py index 588a2a29e5..41e4bf4206 100644 --- a/sdcm/sct_events/decorators.py +++ b/sdcm/sct_events/decorators.py @@ -24,7 +24,7 @@ def raise_event_on_failure(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 ThreadFailedEvent(message=str(exc), traceback=format_exc()).publish() return None diff --git a/sdcm/sct_events/events_processes.py b/sdcm/sct_events/events_processes.py index 54d89d95f1..9ef0cd3137 100644 --- a/sdcm/sct_events/events_processes.py +++ b/sdcm/sct_events/events_processes.py @@ -166,7 +166,7 @@ def __str__(self): def create_default_events_process_registry(log_dir: Union[str, Path]): - global _EVENTS_PROCESSES # pylint: disable=global-statement + global _EVENTS_PROCESSES # pylint: disable=global-statement # noqa: PLW0603 with _EVENTS_PROCESSES_LOCK: if _EVENTS_PROCESSES is None: diff --git a/sdcm/sct_events/file_logger.py b/sdcm/sct_events/file_logger.py index 9795ed1c91..69790ba879 100644 --- a/sdcm/sct_events/file_logger.py +++ b/sdcm/sct_events/file_logger.py @@ -134,7 +134,7 @@ def get_events_by_category(self, limit: Optional[int] = None) -> Dict[str, List[ event.append(line) if event: events_bucket.append("\n".join(event)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 error_msg = f"{self}: failed to read {log_file}: {exc}" LOGGER.error(error_msg) if not events_bucket: diff --git a/sdcm/sct_events/filters.py b/sdcm/sct_events/filters.py index 719580d9da..777cc46e33 100644 --- a/sdcm/sct_events/filters.py +++ b/sdcm/sct_events/filters.py @@ -94,7 +94,7 @@ def __init__(self, def _regex(self): try: return self.regex and re.compile(self.regex, self.regex_flags) - except Exception as exc: + except Exception as exc: # noqa: BLE001 raise ValueError(f'Compilation of the regexp "{self.regex}" failed with error: {exc}') from None def cancel_filter(self) -> None: diff --git a/sdcm/sct_events/handlers/schema_disagreement.py b/sdcm/sct_events/handlers/schema_disagreement.py index 20defa1a0a..96dec95ccf 100644 --- a/sdcm/sct_events/handlers/schema_disagreement.py +++ b/sdcm/sct_events/handlers/schema_disagreement.py @@ -48,7 +48,7 @@ def handle(self, event: CassandraStressLogEvent, tester_obj: "sdcm.tester.Cluste try: link = upload_sstables_to_s3(node, keyspace='system_schema', test_id=tester_obj.test_id) event.add_sstable_link(link) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("failed to upload system_schema sstables for node %s: %s", node.name, exc) event.add_gossip_info(gossip_info) event.add_peers_info(peers_info) diff --git a/sdcm/sct_events/operator.py b/sdcm/sct_events/operator.py index b71b592d0e..af231ec123 100644 --- a/sdcm/sct_events/operator.py +++ b/sdcm/sct_events/operator.py @@ -54,7 +54,7 @@ def add_info(self: T_log_event, node, line: str, line_number: int) -> T_log_even self.source_timestamp = datetime.datetime( year=year, month=month, day=day, hour=int(hour), minute=int(minute), second=int(second), microsecond=int(milliseconds), tzinfo=datetime.timezone.utc).timestamp() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass self.event_timestamp = time.time() self.node = str(node) diff --git a/sdcm/sct_runner.py b/sdcm/sct_runner.py index b93f6ab070..0fbd98bdb7 100644 --- a/sdcm/sct_runner.py +++ b/sdcm/sct_runner.py @@ -375,7 +375,7 @@ def create_image(self) -> None: try: LOGGER.info("Terminating SCT Image Builder instance `%s'...", builder_instance_id) self._terminate_image_builder_instance(instance=instance) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Was not able to terminate `%s': %s\nPlease terminate manually!!!", builder_instance_id, ex) else: @@ -1244,7 +1244,7 @@ def update_sct_runner_tags(backend: str = None, test_runner_ip: str = None, test runner_to_update = runner_to_update[0] runner_to_update.sct_runner_class.set_tags(runner_to_update, tags=tags) LOGGER.info("Tags on SCT runner updated with: %s", tags) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Could not set SCT runner tags to: %s due to exc:\n%s", tags, exc) @@ -1274,7 +1274,6 @@ def _manage_runner_keep_tag_value(utc_now: datetime, return sct_runner_info LOGGER.info("No changes to make to runner tags.") - return sct_runner_info def clean_sct_runners(test_status: str, @@ -1316,9 +1315,9 @@ def clean_sct_runners(test_status: str, LOGGER.info("UTC now: %s", utc_now) if not dry_run and test_runner_ip: - sct_runner_info = _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, - timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, - dry_run=dry_run) + _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, + timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, + dry_run=dry_run) if sct_runner_info.keep: if "alive" in str(sct_runner_info.keep): @@ -1344,7 +1343,7 @@ def clean_sct_runners(test_status: str, sct_runner_info.terminate() runners_terminated += 1 end_message = f"Number of cleaned runners: {runners_terminated}" - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Exception raised during termination of %s: %s", sct_runner_info, exc) end_message = "No runners have been terminated" diff --git a/sdcm/scylla_bench_thread.py b/sdcm/scylla_bench_thread.py index eee527db5a..ee790c364a 100644 --- a/sdcm/scylla_bench_thread.py +++ b/sdcm/scylla_bench_thread.py @@ -233,7 +233,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): # pylint: disable=too-many- log_file=log_file_name, retry=0, ) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.configure_event_on_failure(stress_event=scylla_bench_event, exc=exc) return loader, result diff --git a/sdcm/send_email.py b/sdcm/send_email.py index 56573c6f8b..49750f71c1 100644 --- a/sdcm/send_email.py +++ b/sdcm/send_email.py @@ -581,7 +581,7 @@ class PerfSimpleQueryReporter(BaseEmailReporter): email_template_file = "results_perf_simple_query.html" -def build_reporter(name: str, +def build_reporter(name: str, # noqa: PLR0911 email_recipients: Sequence[str] = (), logdir: Optional[str] = None) -> Optional[BaseEmailReporter]: # pylint: disable=too-many-return-statements,too-many-branches @@ -699,7 +699,7 @@ def send_perf_email(reporter, test_results, logs, email_recipients, testrun_dir, template=email_content['template']) try: reporter.send_email(subject=subject, content=html, files=email_content['attachments']) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to create email due to the following error:\n%s", traceback.format_exc()) build_reporter("TestAborted", email_recipients, testrun_dir).send_report({ "job_url": os.environ.get("BUILD_URL"), @@ -723,7 +723,7 @@ def read_email_data_from_file(filename): with open(filename, encoding="utf-8") as file: data = file.read().strip() email_data = json.loads(data or '{}') - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Error during read email data file %s: %s", filename, details) return email_data @@ -741,5 +741,5 @@ def save_email_data_to_file(email_data, filepath): if email_data: with open(filepath, "w", encoding="utf-8") as json_file: json.dump(email_data, json_file) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Error during collecting data for email %s", details) diff --git a/sdcm/sla/libs/sla_utils.py b/sdcm/sla/libs/sla_utils.py index ea34c7bdcf..1b5d477ec5 100644 --- a/sdcm/sla/libs/sla_utils.py +++ b/sdcm/sla/libs/sla_utils.py @@ -383,7 +383,7 @@ def clean_auth(entities_list_of_dict): if auth: try: auth.drop() - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to drop '%s'. Error: %s", auth.name, error) @staticmethod diff --git a/sdcm/sla/sla_tests.py b/sdcm/sla/sla_tests.py index 909a8146b6..58aba4bd3c 100644 --- a/sdcm/sla/sla_tests.py +++ b/sdcm/sla/sla_tests.py @@ -36,7 +36,7 @@ def run_stress_and_validate_scheduler_runtime_during_load(self, tester, read_cmd possible_issue={'less resources': 'scylla-enterprise#2717'} ) return None - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 wp_event.add_error([str(details)]) wp_event.full_traceback = traceback.format_exc() wp_event.severity = Severity.ERROR @@ -66,7 +66,7 @@ def alter_sl_and_validate_scheduler_runtime(self, tester, service_level, new_sha db_cluster=tester.db_cluster, possible_issue={'less resources': "scylla-enterprise#949"}) return None - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 wp_event.add_error([str(details)]) wp_event.full_traceback = traceback.format_exc() wp_event.severity = Severity.ERROR @@ -82,7 +82,7 @@ def detach_service_level_and_run_load(sl_for_detach, role_with_sl_to_detach, sle role_with_sl_to_detach.detach_service_level() time.sleep(sleep) return None - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 wp_event.add_error([str(details)]) wp_event.full_traceback = traceback.format_exc() wp_event.severity = Severity.ERROR @@ -98,7 +98,7 @@ def drop_service_level_and_run_load(sl_for_drop, role_with_sl_to_drop, sleep=600 role_with_sl_to_drop.reset_service_level() time.sleep(sleep) return None - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 wp_event.add_error([str(details)]) wp_event.full_traceback = traceback.format_exc() wp_event.severity = Severity.ERROR @@ -158,7 +158,7 @@ def validate_role_service_level_attributes_against_db(): 'scylla-enterprise#2572 or scylla-enterprise#2717'} ) return None - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 wp_event.add_error([str(details)]) wp_event.full_traceback = traceback.format_exc() wp_event.severity = Severity.ERROR @@ -204,7 +204,7 @@ def verify_stress_threads(tester, stress_queue): for stress in stress_queue: try: tester.verify_stress_thread(cs_thread_pool=stress) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Stress verifying failed. Error: %s", error) @staticmethod diff --git a/sdcm/stress/latte_thread.py b/sdcm/stress/latte_thread.py index 3a5b8286f4..3462f9cf07 100644 --- a/sdcm/stress/latte_thread.py +++ b/sdcm/stress/latte_thread.py @@ -86,9 +86,9 @@ def run(self): try: match = regex.search(line) if match: - for key, value in match.groupdict().items(): - value = float(value) - self.set_metric(self.operation, key, float(value)) + for key, _value in match.groupdict().items(): + value = float(_value) + self.set_metric(self.operation, key, value) except Exception: # pylint: disable=broad-except LOGGER.exception("fail to send metric") @@ -213,7 +213,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): ) return self.parse_final_output(result) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.configure_event_on_failure(stress_event=latte_stress_event, exc=exc) return {} diff --git a/sdcm/stress_thread.py b/sdcm/stress_thread.py index 95686189dc..74cb24dc28 100644 --- a/sdcm/stress_thread.py +++ b/sdcm/stress_thread.py @@ -66,7 +66,7 @@ def run(self) -> None: if pattern.search(line): if event.severity == Severity.CRITICAL and not self.stop_test_on_failure: - event = event.clone() # so we don't change the severity to other stress threads + event = event.clone() # so we don't change the severity to other stress threads # noqa: PLW2901 event.severity = Severity.ERROR event.add_info(node=self.node, line=line, line_number=line_number).publish() break # Stop iterating patterns to avoid creating two events for one line of the log @@ -94,7 +94,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class CassandraStressThread(DockerBasedStressThread): # pylint: disable=too-many-instance-attributes DOCKER_IMAGE_PARAM_NAME = 'stress_image.cassandra-stress' - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments # noqa: PLR0913 profile=None, node_list=None, round_robin=False, client_encrypt=False, stop_test_on_failure=True, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, @@ -215,7 +215,7 @@ def _get_available_suboptions(self, loader, option, _cache={}): # pylint: disab cmd=f'cassandra-stress help {option} | grep "^Usage:"', timeout=self.timeout, ignore_status=True).stdout - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return [] findings = re.findall(r' *\[([\w-]+?)[=?]*] *', result) _cache[option] = findings @@ -234,7 +234,7 @@ def _add_hdr_log_option(stress_cmd: str, hdr_log_name: str) -> str: cs_log_option = match.group(1) if "hdrfile" not in cs_log_option: stress_cmd = stress_cmd.replace("-log", f"-log hdrfile={hdr_log_name}") - else: + else: # noqa: PLR5501 if replacing_hdr_file := re.search(r"hdrfile=(.*?)\s", cs_log_option): stress_cmd = stress_cmd.replace( f"hdrfile={replacing_hdr_file.group(1)}", f"hdrfile={hdr_log_name}") @@ -348,7 +348,7 @@ def _run_cs_stress(self, loader, loader_idx, cpu_idx, keyspace_idx): # pylint: reporter = CassandraStressVersionReporter( cmd_runner, prefix, loader.parent_cluster.test_config.argus_client()) reporter.report() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to collect cassandra-stress version information", exc_info=True) with cleanup_context, \ CassandraStressExporter(instance_name=cmd_runner_name, @@ -370,7 +370,7 @@ def _run_cs_stress(self, loader, loader_idx, cpu_idx, keyspace_idx): # pylint: try: with SoftTimeoutContext(timeout=self.soft_timeout, operation="cassandra-stress"): result = cmd_runner.run(cmd=node_cmd, timeout=self.hard_timeout, log_file=log_file_name, retry=0) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.configure_event_on_failure(stress_event=cs_stress_event, exc=exc) return loader, result, cs_stress_event @@ -403,7 +403,7 @@ def get_results(self) -> list[dict | None]: node_cs_res = BaseLoaderSet._parse_cs_summary(lines) # pylint: disable=protected-access if node_cs_res: ret.append(node_cs_res) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 event.add_error([f"Failed to process stress summary due to {exc}"]) event.severity = Severity.CRITICAL event.event_error() diff --git a/sdcm/teardown_validators/sstables.py b/sdcm/teardown_validators/sstables.py index 62e006bddf..3dd95b1f74 100644 --- a/sdcm/teardown_validators/sstables.py +++ b/sdcm/teardown_validators/sstables.py @@ -62,7 +62,7 @@ def validate(self): parallel_obj = ParallelObject(objects=self.cluster.nodes, timeout=timeout) parallel_obj.run(run_scrub, ignore_exceptions=False, unpack_objects=True) LOGGER.info("Nodetool scrub validation finished") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during nodetool scrub validation: %s", exc) ValidatorEvent( message=f'Error during nodetool scrub validation: {exc}', severity=Severity.ERROR).publish() diff --git a/sdcm/tester.py b/sdcm/tester.py index c388adc2d2..146f8325be 100644 --- a/sdcm/tester.py +++ b/sdcm/tester.py @@ -237,7 +237,7 @@ def decor(*args, **kwargs): self.log.debug("Silently running '%s'", name) result = funct(*args, **kwargs) self.log.debug("Finished '%s'. No errors were silenced.", name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.debug("Finished '%s'. %s exception was silenced.", name, str(type(exc))) self._store_test_result(args[0], exc, exc.__traceback__, name) return result @@ -273,7 +273,7 @@ def critical_failure_handler(signum, frame): # pylint: disable=unused-argument if TestConfig().tester_obj().teardown_started: TEST_LOG.info("A critical event happened during tearDown") return - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass raise CriticalTestFailure("Critical Error has failed the test") # pylint: disable=raise-missing-from @@ -443,7 +443,7 @@ def send_argus_heartbeat(client: ArgusSCTClient, stop_signal: threading.Event): break try: client.sct_heartbeat() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Failed to submit heartbeat to argus, Try #%s", fail_count + 1) fail_count += 1 time.sleep(30.0) @@ -583,8 +583,8 @@ def argus_collect_logs(self, log_links: dict[str, list[str] | str]): try: logs_to_save = [] for name, link in log_links.items(): - link = LogLink(log_name=name, log_link=link) - logs_to_save.append(link) + argus_link = LogLink(log_name=name, log_link=link) + logs_to_save.append(argus_link) self.test_config.argus_client().submit_sct_logs(logs_to_save) except Exception: # pylint: disable=broad-except self.log.error("Error saving logs to Argus", exc_info=True) @@ -630,7 +630,7 @@ def argus_collect_gemini_results(self): "oracle_node_scylla_version": self.cs_db_cluster.nodes[0].scylla_version if self.cs_db_cluster else "N/A", "oracle_nodes_count": self.params.get("n_test_oracle_db_nodes"), }) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Error submitting gemini results to argus", exc_info=True) def _init_data_validation(self): @@ -1124,7 +1124,7 @@ def get_nemesis_class(self): self.log.debug("Nemesis threads %s", nemesis_threads) return nemesis_threads - def get_cluster_gce(self, loader_info, db_info, monitor_info): + def get_cluster_gce(self, loader_info, db_info, monitor_info): # noqa: PLR0912 # pylint: disable=too-many-locals,too-many-statements,too-many-branches if loader_info['n_nodes'] is None: n_loader_nodes = self.params.get('n_loaders') @@ -1902,7 +1902,7 @@ def run_stress(self, stress_cmd, duration=None): self.verify_stress_thread(cs_thread_pool=cs_thread_pool) # pylint: disable=too-many-arguments,too-many-return-statements - def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments + def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments # noqa: PLR0911, PLR0913 round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', use_single_loader=False, stop_test_on_failure=True): @@ -1945,7 +1945,7 @@ def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_nu raise ValueError(f'Unsupported stress command: "{stress_cmd[:50]}..."') # pylint: disable=too-many-arguments - def run_stress_cassandra_thread( + def run_stress_cassandra_thread( # noqa: PLR0913 self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', stop_test_on_failure=True, params=None, **_): # pylint: disable=too-many-locals @@ -1979,7 +1979,7 @@ def run_stress_cassandra_thread( return cs_thread # pylint: disable=too-many-arguments - def run_cql_stress_cassandra_thread( + def run_cql_stress_cassandra_thread( # noqa: PLR0913 self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', stop_test_on_failure=True, params=None, **_): # pylint: disable=too-many-locals @@ -2360,7 +2360,7 @@ def create_keyspace(self, keyspace_name, replication_factor): does_keyspace_exist = self.wait_validate_keyspace_existence(session, keyspace_name) return does_keyspace_exist - def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches + def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches # noqa: PLR0913 speculative_retry=None, read_repair=None, compression=None, gc_grace=None, columns=None, compaction=None, compact_storage=False, scylla_encryption_options=None, keyspace_name=None, @@ -2418,10 +2418,10 @@ def truncate_cf(self, ks_name: str, table_name: str, session: Session, truncate_ try: timeout = f" USING TIMEOUT {truncate_timeout_sec}s" if truncate_timeout_sec else "" session.execute('TRUNCATE TABLE {0}.{1}{2}'.format(ks_name, table_name, timeout)) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.debug('Failed to truncate base table {0}.{1}. Error: {2}'.format(ks_name, table_name, str(ex))) - def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, + def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, # noqa: PLR0913 # pylint: disable=too-many-arguments mv_columns='*', speculative_retry=None, read_repair=None, compression=None, gc_grace=None, compact_storage=False): @@ -2532,7 +2532,7 @@ def copy_table(self, node, src_keyspace, src_table, dest_keyspace, # pylint: di try: result = self.copy_data_between_tables(node, src_keyspace, src_table, dest_keyspace, dest_table, columns_list) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Copying data from %s to %s failed with error: %s', src_table, dest_table, error) return False @@ -2558,7 +2558,7 @@ def copy_view(self, node, src_keyspace, src_view, dest_keyspace, # pylint: disa try: result = self.copy_data_between_tables(node, src_keyspace, src_view, dest_keyspace, dest_table, columns_list) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error('Copying data from %s to %s failed with error %s', src_view, dest_table, error) return False @@ -2674,7 +2674,7 @@ def copy_data_between_tables(self, node, src_keyspace, src_table, dest_keyspace, 'Actually inserted rows: %s.', len(source_table_rows), succeeded_rows) return False - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning('Problem during copying data: %s', exc) return False @@ -3134,7 +3134,7 @@ def get_data_set_size(self, cs_cmd): # pylint: disable=inconsistent-return-stat """ try: return int(re.search(r"n=(\d+) ", cs_cmd).group(1)) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.fail("Unable to get data set size from cassandra-stress command: %s" % cs_cmd) return None @@ -3147,7 +3147,7 @@ def get_c_s_column_definition(self, cs_cmd): # pylint: disable=inconsistent-ret if search_res := re.search(r".* -col ('.*') .*", cs_cmd): return search_res.group(1) return None - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.fail("Unable to get column definition from cassandra-stress command: %s" % cs_cmd) return None @@ -3505,12 +3505,11 @@ def get_nemesises_stats(self): nemesis_stats = {} if self.create_stats: nemesis_stats = self.get_doc_data(key='nemesis') + elif self.db_cluster: + for nem in self.db_cluster.nemesis: + nemesis_stats.update(nem.stats) else: - if self.db_cluster: - for nem in self.db_cluster.nemesis: - nemesis_stats.update(nem.stats) - else: - self.log.warning("No nemesises as cluster was not created") + self.log.warning("No nemesises as cluster was not created") if nemesis_stats: for detail in nemesis_stats.values(): @@ -3530,7 +3529,7 @@ def save_email_data(self): email_data = self.get_email_data() self._argus_add_relocatable_pkg(email_data) self.argus_collect_screenshots(email_data) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error while saving email data. Error: %s\nTraceback: %s", exc, traceback.format_exc()) json_file_path = os.path.join(self.logdir, "email_data.json") diff --git a/sdcm/tombstone_gc_verification_thread.py b/sdcm/tombstone_gc_verification_thread.py index df42cde5a3..1aa1191170 100644 --- a/sdcm/tombstone_gc_verification_thread.py +++ b/sdcm/tombstone_gc_verification_thread.py @@ -74,7 +74,7 @@ def _run_tombstone_gc_verification(self): try: self.tombstone_gc_verification() tombstone_event.message = "Tombstone GC verification ended successfully" - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 msg = str(exc) msg = f"{msg} while running Nemesis: {db_node.running_nemesis}" if db_node.running_nemesis else msg tombstone_event.message = msg diff --git a/sdcm/utils/adaptive_timeouts/__init__.py b/sdcm/utils/adaptive_timeouts/__init__.py index 00e6557f03..6ad18e71f2 100644 --- a/sdcm/utils/adaptive_timeouts/__init__.py +++ b/sdcm/utils/adaptive_timeouts/__init__.py @@ -18,7 +18,7 @@ def _get_decommission_timeout(node_info_service: NodeLoadInfoService) -> tuple[i timeout = int(node_info_service.node_data_size_mb * 0.03) timeout = max(timeout, 7200) # 2 hours minimum return timeout, node_info_service.as_dict() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failed to calculate decommission timeout: \n%s \nDefaulting to 6 hours", exc) return 6*60*60, {} @@ -27,7 +27,7 @@ def _get_soft_timeout(node_info_service: NodeLoadInfoService, timeout: int | flo # no timeout calculation - just return the timeout passed as argument along with node load info try: return timeout, node_info_service.as_dict() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failed to get node info for timeout: \n%s", exc) return timeout, {} @@ -114,5 +114,5 @@ def adaptive_timeout(operation: Operations, node: "BaseNode", stats_storage: Ada if load_metrics: stats_storage.store(metrics=load_metrics, operation=operation.name, duration=duration, timeout=timeout, timeout_occurred=timeout_occurred) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Failed to store adaptive timeout stats: \n%s", exc) diff --git a/sdcm/utils/adaptive_timeouts/load_info_store.py b/sdcm/utils/adaptive_timeouts/load_info_store.py index d134fb049c..2d11575a09 100644 --- a/sdcm/utils/adaptive_timeouts/load_info_store.py +++ b/sdcm/utils/adaptive_timeouts/load_info_store.py @@ -81,7 +81,7 @@ def _get_node_load(self) -> tuple[float, float, float]: load_5 = float(metrics['node_load5']) load_15 = float(metrics['node_load15']) return load_1, load_5, load_15 - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Couldn't get node load from prometheus metrics. Error: %s", exc) # fallback to uptime load_1, load_5, load_15 = self.remoter.run('uptime').stdout.split("load average: ")[1].split(",") diff --git a/sdcm/utils/aws_utils.py b/sdcm/utils/aws_utils.py index 87c879c43b..5ca9f8fe3f 100644 --- a/sdcm/utils/aws_utils.py +++ b/sdcm/utils/aws_utils.py @@ -123,7 +123,7 @@ def delete_network_interfaces_of_sg(self, sg_id: str): if attachment_id := attachment.get('AttachmentId'): try: self.ec2_client.detach_network_interface(AttachmentId=attachment_id, Force=True) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to detach network interface (%s) attachment %s:\n%s", network_interface_id, attachment_id, exc) @@ -133,7 +133,7 @@ def delete_network_interfaces_of_sg(self, sg_id: str): network_interface_id = interface_description['NetworkInterfaceId'] try: self.ec2_client.delete_network_interface(NetworkInterfaceId=network_interface_id) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to delete network interface %s :\n%s", network_interface_id, exc) def destroy_attached_security_groups(self): @@ -141,7 +141,7 @@ def destroy_attached_security_groups(self): # even when cluster is gone try: sg_list = self.attached_security_group_ids - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to get list of security groups:\n%s", exc) return @@ -151,12 +151,12 @@ def destroy_attached_security_groups(self): # In this case you need to forcefully detach interfaces and delete them to make nodegroup deletion possible. try: self.delete_network_interfaces_of_sg(security_group_id) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("destroy_attached_security_groups: %s", exc) try: self.ec2_client.delete_security_group(GroupId=security_group_id) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to delete security groups %s, due to the following error:\n%s", security_group_id, exc) @@ -166,7 +166,7 @@ def _destroy_attached_nodegroups(): for node_group_name in self._get_attached_nodegroup_names(status=status): try: self.eks_client.delete_nodegroup(clusterName=self.short_cluster_name, nodegroupName=node_group_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to delete nodegroup %s/%s, due to the following error:\n%s", self.short_cluster_name, node_group_name, exc) time.sleep(10) @@ -181,7 +181,7 @@ def _destroy_attached_nodegroups(): def destroy_cluster(self): try: self.eks_client.delete_cluster(name=self.short_cluster_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed to delete cluster %s, due to the following error:\n%s", self.short_cluster_name, exc) @@ -205,7 +205,7 @@ def destroy_oidc_provider(self): LOGGER.warning( "Couldn't find any OIDC provider associated with the '%s' EKS cluster", self.short_cluster_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning( "Failed to delete OIDC provider for the '%s' cluster due to " "the following error:\n%s", diff --git a/sdcm/utils/benchmarks.py b/sdcm/utils/benchmarks.py index 3a53177223..193b761db5 100644 --- a/sdcm/utils/benchmarks.py +++ b/sdcm/utils/benchmarks.py @@ -202,7 +202,7 @@ def _compare_results(self): margins=Margins(sysbench_eps=0.03, cassandra_fio_read_bw=0.01, cassandra_fio_write_bw=0.01))) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning( "Failed to generate comparable result for the following item:\n%s" "\nException:%s", runner.benchmark_results, exc) @@ -236,7 +236,7 @@ def _get_average_results(es_docs: list): cassandra_fio_read_bw=item["cassandra_fio_lcs_64k_read"]["read"]["bw"], cassandra_fio_write_bw=item["cassandra_fio_lcs_64k_write"]["write"]["bw"] )) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning( "Failed to generate comparable result for the following item:\n%s" "\nException:%s", item, exc) @@ -296,7 +296,7 @@ def _install_ubuntu_prerequisites(self): for pkg in package_list: self._node.install_package(pkg) LOGGER.info("Ubuntu prerequisites for the node benchmarks installed.") - except Exception as exc: + except Exception as exc: # noqa: BLE001 LOGGER.warning("Failed to install Ubuntu prerequisites for the node benchmarking tools. " "Exception:\n%s", exc) @@ -342,7 +342,7 @@ def _get_fio_results(self): for job in cassandra_fio_jobs: jsoned_output.update({f'cassandra_fio_{job["jobname"]}': job}) self.benchmark_results.update(jsoned_output) - except Exception as exc: + except Exception as exc: # noqa: BLE001 LOGGER.warning("Failed to get cassandra-fio result for node %s with exception:\n%s", self.node_name, exc) def run_benchmarks(self): diff --git a/sdcm/utils/bisect_test.py b/sdcm/utils/bisect_test.py index bdf94a384e..dec2c40d0a 100644 --- a/sdcm/utils/bisect_test.py +++ b/sdcm/utils/bisect_test.py @@ -111,7 +111,7 @@ def update_binaries(node): raise ValueError('failed to get version from node: ', node.name) logger.info('successfully updated binaries to version: %s', version) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 logger.warning('error during upgrade: %s \n verifying next closest version.', exc) del repo_urls[mid] high -= 1 diff --git a/sdcm/utils/cdc/options.py b/sdcm/utils/cdc/options.py index 9a9a4f21a6..a57907f4c3 100644 --- a/sdcm/utils/cdc/options.py +++ b/sdcm/utils/cdc/options.py @@ -60,12 +60,13 @@ def parse_cdc_blob_settings(blob: bytes) -> Dict[str, Union[bool, str]]: for regexp in CDC_SETTINGS_REGEXP: res = re.search(regexp, blob.decode()) if res: - for key, value in res.groupdict().items(): - if value in ("false", "off"): + for key, _value in res.groupdict().items(): + if _value in ("false", "off"): value = False - elif value == 'true': + elif _value == 'true': value = True - + else: + value = _value cdc_settings[key] = value return cdc_settings diff --git a/sdcm/utils/cloud_monitor/resources/__init__.py b/sdcm/utils/cloud_monitor/resources/__init__.py index 5e46b1e76c..ed444753d9 100644 --- a/sdcm/utils/cloud_monitor/resources/__init__.py +++ b/sdcm/utils/cloud_monitor/resources/__init__.py @@ -23,7 +23,7 @@ def __init__(self, cloud, name, instance_id, region_az, state, lifecycle, instan try: self.price = self.pricing.get_instance_price(region=self.region, instance_type=self.instance_type, state=self.state, lifecycle=self.lifecycle) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 self.price = -0.0 # to indicate in the report that we were unable to get the price. @property diff --git a/sdcm/utils/cloud_monitor/resources/instances.py b/sdcm/utils/cloud_monitor/resources/instances.py index 82348b5e29..269b96e3ac 100644 --- a/sdcm/utils/cloud_monitor/resources/instances.py +++ b/sdcm/utils/cloud_monitor/resources/instances.py @@ -47,7 +47,7 @@ def get_owner_from_cloud_trail(self): for event in result["Events"]: if event['EventName'] == 'RunInstances': return event["Username"] - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Error occurred when trying to find an owner for '%s' in CloudTrail: %s", self._instance['InstanceId'], exc) return None diff --git a/sdcm/utils/common.py b/sdcm/utils/common.py index 4bd8c9fb28..bd5c2d9690 100644 --- a/sdcm/utils/common.py +++ b/sdcm/utils/common.py @@ -120,7 +120,7 @@ def _remote_get_hash(remoter, file_path): try: result = remoter.run('md5sum {}'.format(file_path), verbose=True) return result.stdout.strip().split()[0] - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(str(details)) return None @@ -287,7 +287,7 @@ def upload_file(self, file_path, dest_dir=''): LOGGER.info("Set public read access") self.set_public_access(key=s3_obj) return s3_url - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Unable to upload to S3: %s", details) return "" @@ -316,7 +316,7 @@ def download_file(self, link, dst_dir): LOGGER.info("Downloaded finished") return os.path.join(os.path.abspath(dst_dir), file_name) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("File {} is not downloaded by reason: {}".format(key_name, details)) return "" @@ -484,7 +484,7 @@ def inner(*args, **kwargs): except FuturesTimeoutError as exception: results.append(ParallelObjectResult(obj=target_obj, exc=exception, result=None)) time_out = 0.001 # if there was a timeout on one of the futures there is no need to wait for all - except Exception as exception: # pylint: disable=broad-except + except Exception as exception: # pylint: disable=broad-except # noqa: BLE001 results.append(ParallelObjectResult(obj=target_obj, exc=exception, result=None)) else: results.append(ParallelObjectResult(obj=target_obj, exc=None, result=result)) @@ -782,13 +782,13 @@ def delete_image(image): for container in containers: try: delete_container(container) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to delete container %s on host `%s'", container, container.client.info()["Name"]) for image in images: try: delete_image(image) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to delete image tag(s) %s on host `%s'", image.tags, image.client.info()["Name"]) @@ -1042,7 +1042,7 @@ def clean_test_security_groups(tags_dict, regions=None, dry_run=False): try: response = client.delete_security_group(GroupId=group_id) LOGGER.debug("Done. Result: %s\n", response) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed with: %s", str(ex)) @@ -1125,7 +1125,7 @@ def clean_load_balancers_aws(tags_dict, regions=None, dry_run=False): try: response = client.delete_load_balancer(LoadBalancerName=arn.split('/')[1]) LOGGER.debug("Done. Result: %s\n", response) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed with: %s", str(ex)) @@ -1202,7 +1202,7 @@ def clean_cloudformation_stacks_aws(tags_dict, regions=None, dry_run=False): try: response = client.delete_stack(StackName=arn.split('/')[1]) LOGGER.debug("Done. Result: %s\n", response) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug("Failed with: %s", str(ex)) @@ -1276,7 +1276,7 @@ def clean_launch_templates_aws(tags_dict, regions=None, dry_run=False): LOGGER.info( "Successfully deleted '%s' LaunchTemplate. Response: %s\n", (current_lt_name or current_lt_id), response) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to delete the '%s' LaunchTemplate: %s", deletion_args, str(ex)) @@ -1406,7 +1406,7 @@ def __init__(self): def list_gke_clusters(self) -> list: try: output = self.gcloud.run("container clusters list --format json") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("`gcloud container clusters list --format json' failed to run: %s", exc) else: try: @@ -1422,7 +1422,7 @@ def list_orphaned_gke_disks(self) -> dict: try: disks = json.loads(self.gcloud.run( 'compute disks list --format="json(name,zone)" --filter="name~^gke-.*-pvc-.* AND -users:*"')) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("`gcloud compute disks list' failed to run: %s", exc) else: for disk in disks: @@ -1481,13 +1481,13 @@ def list_clusters(self) -> list: # pylint: disable=no-self-use for aws_region in regions or all_aws_regions(): try: cluster_names = boto3.client('eks', region_name=aws_region).list_clusters()['clusters'] - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to get list of EKS clusters in the '%s' region: %s", aws_region, exc) return [] for cluster_name in cluster_names: try: eks_clusters.append(EksCluster(cluster_name, aws_region)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Failed to get body of cluster on EKS: %s", exc) return eks_clusters @@ -1604,7 +1604,7 @@ def delete_cluster(cluster): try: res = cluster.destroy() LOGGER.info("%s deleted=%s", cluster.name, res) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(exc) ParallelObject(gke_clusters_to_clean, timeout=180).run(delete_cluster, ignore_exceptions=True) @@ -1623,7 +1623,7 @@ def clean_orphaned_gke_disks(tags_dict: dict, dry_run: bool = False) -> None: gke_cleaner.clean_disks(disk_names=disk_names, zone=zone) LOGGER.info("Deleted following orphaned GKE disks in the '%s' zone (%s project): %s", zone, os.environ.get("SCT_GCE_PROJECT"), disk_names) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(exc) @@ -1645,7 +1645,7 @@ def delete_cluster(cluster): res = cluster.destroy() LOGGER.info("'%s' EKS cluster in the '%s' region has been deleted. Response=%s", cluster.name, cluster.region_name, res) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error(exc) ParallelObject(eks_clusters_to_clean, timeout=180).run(delete_cluster, ignore_exceptions=True) @@ -1757,7 +1757,7 @@ def safe_kill(pid, signal): try: os.kill(pid, signal) return True - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 return False @@ -2145,7 +2145,7 @@ def remove_files(path): shutil.rmtree(path=path, ignore_errors=True) if os.path.isfile(path): os.remove(path) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during remove archived logs %s", details) LOGGER.info("Remove temporary data manually: \"%s\"", path) @@ -2163,7 +2163,7 @@ def create_remote_storage_dir(node, path='') -> Optional[str, None]: 'Remote storing folder not created.\n %s', result) remote_dir = node_remote_dir - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Error during creating remote directory %s", details) return None @@ -2277,15 +2277,14 @@ def download_dir_from_cloud(url): LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir) if os.path.isdir(tmp_dir) and os.listdir(tmp_dir): LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir)) + elif url.startswith('s3://'): + s3_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif url.startswith('gs://'): + gce_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif os.path.isdir(url): + tmp_dir = url else: - if url.startswith('s3://'): - s3_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif url.startswith('gs://'): - gce_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif os.path.isdir(url): - tmp_dir = url - else: - raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) + raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) if not tmp_dir.endswith('/'): tmp_dir += '/' LOGGER.info("Finished downloading [%s]", url) @@ -2744,7 +2743,7 @@ def approach_enospc(): LOGGER.debug('Cost 90% free space on /var/lib/scylla/ by {}'.format(occupy_space_cmd)) try: target_node.remoter.sudo(occupy_space_cmd, verbose=True) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning(str(details)) return bool(list(no_space_log_reader)) @@ -2941,11 +2940,11 @@ def walk_thru_data(data, path: str, separator: str = '/') -> Any: if not name: continue if name[0] == '[' and name[-1] == ']': - name = name[1:-1] + name = name[1:-1] # noqa: PLW2901 if name.isalnum() and isinstance(current_value, (list, tuple, set)): try: current_value = current_value[int(name)] - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 current_value = None continue current_value = current_value.get(name, None) diff --git a/sdcm/utils/data_validator.py b/sdcm/utils/data_validator.py index 37bfc1a07f..2936d0b272 100644 --- a/sdcm/utils/data_validator.py +++ b/sdcm/utils/data_validator.py @@ -462,33 +462,31 @@ def validate_range_not_expected_to_change(self, session, during_nemesis=False): message=f"Actual dataset length more then expected ({len(actual_result)} > {len(expected_result)}). " f"Issue #6181" ).publish() - else: - if not during_nemesis: - assert len(actual_result) == len(expected_result), \ - 'One or more rows are not as expected, suspected LWT wrong update. ' \ - 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), - len(expected_result)) + elif not during_nemesis: + assert len(actual_result) == len(expected_result), \ + 'One or more rows are not as expected, suspected LWT wrong update. ' \ + 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), + len(expected_result)) - assert actual_result == expected_result, \ - 'One or more rows are not as expected, suspected LWT wrong update' + assert actual_result == expected_result, \ + 'One or more rows are not as expected, suspected LWT wrong update' - # Raise info event at the end of the test only. - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.NORMAL, - message="Validation immutable rows finished successfully" - ).publish() - else: - if len(actual_result) < len(expected_result): - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.ERROR, - error=f"Verify immutable rows. " - f"One or more rows not found as expected, suspected LWT wrong update. " - f"Actual dataset length: {len(actual_result)}, " - f"Expected dataset length: {len(expected_result)}" - ).publish() - else: - LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', - len(actual_result), len(expected_result)) + # Raise info event at the end of the test only. + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.NORMAL, + message="Validation immutable rows finished successfully" + ).publish() + elif len(actual_result) < len(expected_result): + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.ERROR, + error=f"Verify immutable rows. " + f"One or more rows not found as expected, suspected LWT wrong update. " + f"Actual dataset length: {len(actual_result)}, " + f"Expected dataset length: {len(expected_result)}" + ).publish() + else: + LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', + len(actual_result), len(expected_result)) def list_of_view_names_for_update_test(self): # List of tuples of correlated view names for validation: before update, after update, expected data @@ -621,7 +619,7 @@ def analyze_updated_data_and_save_in_file(self, data_for_validation: DataForVali try: row_data.update({key: list(session.execute(query % (select_columns, table)))}) - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Query %s failed. Error: %s", query % table, error) row_data.update({'source_all_columns': list(session.execute(query % (columns_for_validation, diff --git a/sdcm/utils/database_query_utils.py b/sdcm/utils/database_query_utils.py index 12f747c674..5911541625 100644 --- a/sdcm/utils/database_query_utils.py +++ b/sdcm/utils/database_query_utils.py @@ -93,7 +93,7 @@ def collect_partitions_info(self, ignore_limit_rows_number: bool = False) -> dic session.default_consistency_level = ConsistencyLevel.QUORUM pk_list = sorted(get_partition_keys(ks_cf=self.table_name, session=session, pk_name=self.primary_key_column)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 TestFrameworkEvent(source=self.__class__.__name__, message=error_message.format(exc), severity=Severity.ERROR).publish() return None @@ -120,7 +120,7 @@ def collect_partitions_info(self, ignore_limit_rows_number: bool = False) -> dic statement=count_pk_rows_cmd, retries=1, timeout=600, raise_on_exceeded=True, verbose=False) pk_rows_num_result = pk_rows_num_query_result[0].count - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 TestFrameworkEvent(source=self.__class__.__name__, message=error_message.format(exc), severity=Severity.ERROR).publish() return None diff --git a/sdcm/utils/docker_utils.py b/sdcm/utils/docker_utils.py index cc5e7194ba..ec3578f1f2 100644 --- a/sdcm/utils/docker_utils.py +++ b/sdcm/utils/docker_utils.py @@ -176,8 +176,8 @@ def _get_attr_for_name(instance: INodeWithContainerManager, if not name_only_lookup: attr_candidate_list.append((attr, ())) - for attr_candidate, args in attr_candidate_list: - attr_candidate = getattr(instance, attr_candidate, None) + for _attr_candidate, args in attr_candidate_list: + attr_candidate = getattr(instance, _attr_candidate, None) if callable(attr_candidate): attr_candidate = attr_candidate(*args) if attr_candidate is not None: @@ -251,7 +251,7 @@ def destroy_container(cls, instance: INodeWithContainerManager, name: str, ignor try: with open(logfile, "ab") as log: log.write(container.logs()) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Unable to write container logs to %s", logfile, exc_info=exc) else: LOGGER.debug("Container %s logs written to %s", container, logfile) @@ -268,7 +268,7 @@ def destroy_all_containers(cls, instance: INodeWithContainerManager, ignore_keep for name in tuple(instance._containers.keys()): try: cls.destroy_container(instance, name, ignore_keepalive=ignore_keepalive) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("%s: some exception raised during container `%s' destroying", instance, name, exc_info=exc) @classmethod diff --git a/sdcm/utils/gce_utils.py b/sdcm/utils/gce_utils.py index 85f2d2e378..7fe5ef91d5 100644 --- a/sdcm/utils/gce_utils.py +++ b/sdcm/utils/gce_utils.py @@ -144,10 +144,10 @@ def _span_container(self): return try: self._container = self._instance._get_gcloud_container() # pylint: disable=protected-access - except Exception as exc: + except Exception as exc: # noqa: BLE001 try: ContainerManager.destroy_container(self._instance, self._name) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass raise exc from None @@ -157,7 +157,7 @@ def _destroy_container(self): return try: ContainerManager.destroy_container(self._instance, self._name) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass self._container = None @@ -370,7 +370,7 @@ def disk_from_image( # pylint: disable=too-many-arguments return boot_disk -def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements +def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa: PLR0913 project_id: str, zone: str, instance_name: str, diff --git a/sdcm/utils/get_username.py b/sdcm/utils/get_username.py index e2914a587d..c78386fab8 100644 --- a/sdcm/utils/get_username.py +++ b/sdcm/utils/get_username.py @@ -24,7 +24,7 @@ def get_email_user(email_addr: str) -> str: return email_addr.strip().split("@")[0] -def get_username() -> str: # pylint: disable=too-many-return-statements +def get_username() -> str: # pylint: disable=too-many-return-statements # noqa: PLR0911 # First we check if user is being impersonated by an api call actual_user_from_request = os.environ.get('BUILD_USER_REQUESTED_BY') if actual_user_from_request: diff --git a/sdcm/utils/git.py b/sdcm/utils/git.py index 82b24baa3e..b22ccf7857 100644 --- a/sdcm/utils/git.py +++ b/sdcm/utils/git.py @@ -103,5 +103,5 @@ def clone_repo(remoter, repo_url: str, destination_dir_name: str = "", clone_as_ remoter.run(clone_cmd) LOGGER.debug("Finished cloning from %s.", repo_url) - except Exception as exc: + except Exception as exc: # noqa: BLE001 LOGGER.warning("Failed to clone from %s. Failed with: %s", repo_url, exc) diff --git a/sdcm/utils/issues.py b/sdcm/utils/issues.py index 4ea40e75b6..d7301aa20f 100644 --- a/sdcm/utils/issues.py +++ b/sdcm/utils/issues.py @@ -96,7 +96,7 @@ def get_issue_details(self, issue): # this would mean that we would assume issue is open, and enable the skips needed, without having the # actual data of the issue return github.Issue.Issue(requester=None, headers={}, attributes=dict(state='open'), completed=True) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 logging.warning("failed to get issue: %s", issue) TestFrameworkEvent(source=self.__class__.__name__, message=f"failed to get issue {issue}", diff --git a/sdcm/utils/k8s/__init__.py b/sdcm/utils/k8s/__init__.py index fb9fd137a5..a494ef2d0c 100644 --- a/sdcm/utils/k8s/__init__.py +++ b/sdcm/utils/k8s/__init__.py @@ -180,7 +180,7 @@ def check_if_api_not_operational(self, kluster, num_requests=20): try: self._api_test(kluster) passed += 1 - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 time.sleep(1 / self.rate_limit) return passed < num_requests * 0.8 @@ -553,7 +553,7 @@ def gather_k8s_logs_by_operator(cls, kluster, logdir_path=None): # Error: destination directory "%logdir_path%/must-gather" is not empty LOCALRUNNER.run(f"mkdir -p {logdir_path}/must-gather && rm -rf {logdir_path}/must-gather/*") LOCALRUNNER.run(gather_logs_cmd) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning( "Failed to run scylla-operator's 'must gather' command: %s", exc, extra={'prefix': kluster.region_name}) @@ -563,7 +563,7 @@ def gather_k8s_logs_by_operator(cls, kluster, logdir_path=None): extra={'prefix': kluster.region_name}) try: LOCALRUNNER.run(f"rm {operator_bin_path}") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning( "Failed to delete the the scylla-operator binary located at '%s': %s", operator_bin_path, exc, extra={'prefix': kluster.region_name}) @@ -625,10 +625,10 @@ def gather_k8s_logs(cls, logdir_path, kubectl=None, namespaces=None) -> None: # resource_type, namespace) resource_dir = logdir / namespace_scope_dir / namespace / resource_type os.makedirs(resource_dir, exist_ok=True) - for res in resources_wide.split("\n"): - if not re.match(f"{namespace} ", res): + for _res in resources_wide.split("\n"): + if not re.match(f"{namespace} ", _res): continue - res = res.split()[1] + res = _res.split()[1] logfile = resource_dir / f"{res}.yaml" res_stdout = kubectl( f"get {resource_type}/{res} -o yaml 2>&1 | tee {logfile}", @@ -784,7 +784,7 @@ def run(self): self._check_token_validity_in_temporary_location() self._replace_active_token_by_token_from_temporary_location() LOGGER.debug('Cloud token has been updated and stored at %s', self._kubectl_token_path) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug('Failed to update cloud token: %s', exc) wait_time = 5 else: @@ -800,7 +800,7 @@ def _clean_up_token_in_temporary_location(self): try: if os.path.exists(self._temporary_token_path): os.unlink(self._temporary_token_path) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug('Failed to cleanup temporary token: %s', exc) def _check_token_validity_in_temporary_location(self): @@ -909,7 +909,7 @@ def run(self) -> None: try: for line in self._read_stream(): self._process_line(line) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 if not self._termination_event.wait(0.01): raise self.log.info("Scylla pods IP change tracker thread has been stopped") @@ -983,7 +983,7 @@ def _process_line(self, line: str) -> None: # pylint: disable=too-many-branches break else: break - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning( "Failed to parse following line: %s\nerr: %s", line, exc) @@ -1001,7 +1001,7 @@ def process_callback(callback, namespace, pod_name=None, add_pod_name_as_kwarg=F self.log.debug("Calling '%s' callback %s", func.__name__, suffix) try: func(*args, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.warning("Callback call failed %s: %s", suffix, str(exc)) data = self.mapper_dict.get(namespace, {}) @@ -1068,7 +1068,7 @@ def register_callbacks(self, callbacks: Union[Callable, list[Callable]], for callback in callbacks: if callable(callback): - callback = [callback, [], {}] + callback = [callback, [], {}] # noqa: PLW2901 if (isinstance(callback, (tuple, list)) and len(callback) == 3 and callable(callback[0]) @@ -1258,7 +1258,7 @@ def delete(self, path): last = int(last[1:-1]) try: del parent[last] - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass def as_dict(self): diff --git a/sdcm/utils/k8s/chaos_mesh.py b/sdcm/utils/k8s/chaos_mesh.py index 790eada1aa..160e84b28b 100644 --- a/sdcm/utils/k8s/chaos_mesh.py +++ b/sdcm/utils/k8s/chaos_mesh.py @@ -165,7 +165,7 @@ def start(self): self._end_time = time.time() + self._timeout # pylint: disable=too-many-return-statements - def get_status(self) -> ExperimentStatus: + def get_status(self) -> ExperimentStatus: # noqa: PLR0911 """Gets status of chaos-mesh experiment.""" result = self._k8s_cluster.kubectl( f"get {self.CHAOS_KIND} {self._name} -n {self._namespace} -o jsonpath='{{.status.conditions}}'", verbose=False) diff --git a/sdcm/utils/latency.py b/sdcm/utils/latency.py index 2b1013572b..e9b06c5b52 100644 --- a/sdcm/utils/latency.py +++ b/sdcm/utils/latency.py @@ -32,7 +32,7 @@ def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): for precision in cassandra_stress_precision: metric = f'c-s {precision}' if precision == 'max' else f'c-s P{precision}' if not precision == 'max': - precision = f'perc_{precision}' + precision = f'perc_{precision}' # noqa: PLW2901 query = f'sct_cassandra_stress_{load_type}_gauge{{type="lat_{precision}"}}' query_res = prometheus.query(query, start, end) latency_values_lst = [] diff --git a/sdcm/utils/ldap.py b/sdcm/utils/ldap.py index b9944b0c9b..54f01b43e2 100644 --- a/sdcm/utils/ldap.py +++ b/sdcm/utils/ldap.py @@ -34,7 +34,7 @@ LDAP_PASSWORD = 'scylla-0' LDAP_ROLE = 'scylla_ldap' LDAP_USERS = ['scylla-qa', 'dummy-user'] -LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) +LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) # noqa: PLC3002 SASLAUTHD_AUTHENTICATOR = 'com.scylladb.auth.SaslauthdAuthenticator' diff --git a/sdcm/utils/log.py b/sdcm/utils/log.py index 24fc799937..3f05f6afc6 100644 --- a/sdcm/utils/log.py +++ b/sdcm/utils/log.py @@ -79,14 +79,14 @@ def replace_vars(obj, variables, obj_type=None): if issubclass(obj_type, dict): output = {} for attr_name, attr_value in obj.items(): - attr_name = replace_vars(attr_name, variables) - attr_value = replace_vars(attr_value, variables) + attr_name = replace_vars(attr_name, variables) # noqa: PLW2901 + attr_value = replace_vars(attr_value, variables) # noqa: PLW2901 output[attr_name] = attr_value # deepcode ignore UnhashableKey: you get same keys type as source return output if issubclass(obj_type, list): output = [] for element in obj: - element = replace_vars(element, variables) + element = replace_vars(element, variables) # noqa: PLW2901 output.append(element) # deepcode ignore InfiniteLoopByCollectionModification: Not even close return output if issubclass(obj_type, tuple): diff --git a/sdcm/utils/log_time_consistency.py b/sdcm/utils/log_time_consistency.py index 239576bbe3..69bbacfce4 100644 --- a/sdcm/utils/log_time_consistency.py +++ b/sdcm/utils/log_time_consistency.py @@ -96,7 +96,7 @@ def _analyze_file(cls, log_file: Path) -> tuple[dict[str, list[str]], dict[str, continue try: current_time = datetime.datetime.fromisoformat(line.split()[0]).timestamp() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 continue current_time_shift = prior_time - current_time if bucket_name := cls._get_timeshift_bucket_name(current_time_shift): @@ -132,7 +132,7 @@ def _analyze_file(cls, log_file: Path) -> tuple[dict[str, list[str]], dict[str, sct_time, event_time = match.groups() sct_time = datetime.datetime.fromisoformat(sct_time).timestamp() event_time = datetime.datetime.fromisoformat(event_time).timestamp() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 continue current_time_shift = sct_time - event_time if bucket_name := cls._get_timeshift_bucket_name(time_shift=current_time_shift): diff --git a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py index e303ec09bf..280a52742f 100644 --- a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py +++ b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py @@ -117,9 +117,8 @@ def make_table_line_for_render(data): if "_per_op" in key: if diff < 1 + regression_limit: table_line["is_" + key + "_within_limits"] = True - else: - if diff > 1 - regression_limit: - table_line["is_" + key + "_within_limits"] = True + elif diff > 1 - regression_limit: + table_line["is_" + key + "_within_limits"] = True table_line[key + "_diff"] = round((diff - 1) * 100, 2) table_line[key] = round(table_line[key], 2) table_line["mad tps"] = round(table_line["mad tps"], 2) diff --git a/sdcm/utils/nemesis_utils/indexes.py b/sdcm/utils/nemesis_utils/indexes.py index 4f9933828d..4e203123d1 100644 --- a/sdcm/utils/nemesis_utils/indexes.py +++ b/sdcm/utils/nemesis_utils/indexes.py @@ -36,7 +36,7 @@ def is_cf_a_view(node: BaseNode, ks, cf) -> bool: f" WHERE keyspace_name = '{ks}'" f" AND view_name = '{cf}'") return result and bool(len(result.one())) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.debug('Got no result from system_schema.views for %s.%s table. Error: %s', ks, cf, exc) return False @@ -103,7 +103,7 @@ def verify_query_by_index_works(session, ks, cf, column) -> None: query = SimpleStatement(f'SELECT * FROM {ks}.{cf} WHERE "{column}" = %s LIMIT 100', fetch_size=100) LOGGER.debug("Verifying query by index works: %s", query) result = session.execute(query, parameters=(value,)) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 InfoEvent(message=f"Index {ks}.{cf}({column}) does not work in query: {query}. Reason: {exc}", severity=Severity.ERROR).publish() if len(list(result)) == 0: diff --git a/sdcm/utils/operations_thread.py b/sdcm/utils/operations_thread.py index 62b17223e7..25c8dd0353 100644 --- a/sdcm/utils/operations_thread.py +++ b/sdcm/utils/operations_thread.py @@ -149,7 +149,7 @@ def _run_next_operation(self): self.log.debug("Thread operations queue depleted.") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self.log.error(traceback.format_exc()) self.log.error("Encountered exception while performing a operation:\n%s", exc) diff --git a/sdcm/utils/properties.py b/sdcm/utils/properties.py index e0ffebafaf..d759f6f0c2 100644 --- a/sdcm/utils/properties.py +++ b/sdcm/utils/properties.py @@ -49,11 +49,11 @@ def deserialize(data: Union[str, TextIO]) -> PropertiesDict: if not line.strip() or line.lstrip()[0] == '#': output[line] = None continue - line = line.split('=', 1) - if len(line) == 2: - value = line[1] + line_splitted = line.split('=', 1) + if len(line_splitted) == 2: + value = line_splitted[1] comment_pos = value.find('#') if comment_pos >= 0: value = value[0:value] - output[line[0].strip()] = value.strip().strip('"').strip("'") + output[line_splitted[0].strip()] = value.strip().strip('"').strip("'") return output diff --git a/sdcm/utils/quota.py b/sdcm/utils/quota.py index 7e11f31072..7cbf6fdb49 100644 --- a/sdcm/utils/quota.py +++ b/sdcm/utils/quota.py @@ -104,7 +104,7 @@ def approach_end_of_quota(): try: LOGGER.debug('Cost 90% free space on /var/lib/scylla/ by {}'.format(occupy_space_cmd)) node.remoter.sudo(occupy_space_cmd, Verbose=True) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("We should have reached the expected I/O error and quota has exceeded\n" "Message: {}".format(str(exc))) return bool(list(quota_exceeded_appearances)) diff --git a/sdcm/utils/raft/__init__.py b/sdcm/utils/raft/__init__.py index e17cecb38f..30937cbf22 100644 --- a/sdcm/utils/raft/__init__.py +++ b/sdcm/utils/raft/__init__.py @@ -165,7 +165,7 @@ def get_group0_members(self) -> list[dict[str, str]]: for row in rows: group0_members.append({"host_id": str(row.server_id), "voter": row.can_vote}) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 err_msg = f"Get group0 members failed with error: {exc}" LOGGER.error(err_msg) diff --git a/sdcm/utils/raft/common.py b/sdcm/utils/raft/common.py index 102245b582..3af7529ce0 100644 --- a/sdcm/utils/raft/common.py +++ b/sdcm/utils/raft/common.py @@ -66,7 +66,7 @@ def _start_bootstrap(self): self.bootstrap_node.parent_cluster.node_setup(self.bootstrap_node, verbose=True) self.bootstrap_node.parent_cluster.node_startup(self.bootstrap_node, verbose=True) LOGGER.debug("Node %s was bootstrapped", self.bootstrap_node.name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Setup failed for node %s with err %s", self.bootstrap_node.name, exc) finally: self._set_wait_stop_event() @@ -82,7 +82,7 @@ def _abort_bootstrap(self, abort_action: Callable, log_message: str, timeout: in stop_event=self.bootstrap_node.stop_wait_db_up_event) abort_action() LOGGER.info("Scylla was stopped successfully on node %s", self.bootstrap_node.name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Abort was failed on node %s with error %s", self.bootstrap_node.name, exc) finally: self._set_wait_stop_event() @@ -149,7 +149,7 @@ def clean_and_restart_bootstrap_after_abort(self): self.bootstrap_node.start_scylla_jmx() self.db_cluster.check_nodes_up_and_normal( nodes=[self.bootstrap_node], verification_node=self.verification_node) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("Scylla service restart failed: %s", exc) self.clean_unbootstrapped_node() raise BootstrapStreamErrorFailure(f"Rebootstrap failed with error: {exc}") from exc diff --git a/sdcm/utils/remote_logger.py b/sdcm/utils/remote_logger.py index 026d87af7a..4eb5d1fac9 100644 --- a/sdcm/utils/remote_logger.py +++ b/sdcm/utils/remote_logger.py @@ -107,7 +107,7 @@ def _retrieve(self, since: str) -> None: ignore_status=True, log_file=self._target_log_file, ) - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self._log.error("Error retrieving remote node DB service log: %s", details) @cached_property @@ -198,7 +198,7 @@ def _is_ready_to_retrieve(self) -> bool: def _is_file_exist(self, file_path: str) -> bool: try: return self._remoter.run(cmd=f"sudo test -e {file_path}", ignore_status=True).ok - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self._log.error("Error checking if file %s exists: %s", file_path, details) return False @@ -245,7 +245,7 @@ def _thread_body(self): if started: # Update last time only if command successfully started self._last_time_completed = time.time() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass def start(self): @@ -460,7 +460,7 @@ def _log_loop(self): self._log.debug( "'_read_log_line()': failed to read from pod %s log stream:%s", self._pod_name, exc) self._open_stream() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 self._log.error( "'_read_log_line()': failed to read from pod %s log stream:%s", self._pod_name, exc) self._open_stream() @@ -562,7 +562,7 @@ def _logger_cmd(self) -> str: else: wrong_scheduled_pods_on_scylla_node.append( f"{pod.metadata.name} ({pod.spec.node_name} node)") - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 self._log.warning("Failed to get pods list: %s", str(details)) if not wrong_scheduled_pods_on_scylla_node: @@ -573,7 +573,7 @@ def _logger_cmd(self) -> str: return f"echo \"I`date -u +\"%m%d %H:%M:%S\"` {message}\" >> {self._target_log_file} 2>&1" -def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements +def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements # noqa: PLR0911 if logs_transport == 'docker': return DockerGeneralLogger(node, target_log_file) if logs_transport == 'kubectl': diff --git a/sdcm/utils/sstable/s3_uploader.py b/sdcm/utils/sstable/s3_uploader.py index 7171a022e7..88921b7880 100644 --- a/sdcm/utils/sstable/s3_uploader.py +++ b/sdcm/utils/sstable/s3_uploader.py @@ -31,7 +31,7 @@ def upload_sstables_to_s3(node: CollectingNode | BaseNode, keyspace: str, test_i if s3_link: LOGGER.info("Successfully uploaded sstables on node %s for keyspace %s", node.name, keyspace) node.remoter.run(f"nodetool clearsnapshot -t {snapshot_tag} {keyspace}") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Error while getting and uploading sstables: %s", exc, exc_info=exc) s3_link = "" return s3_link diff --git a/sdcm/utils/sstable/sstable_utils.py b/sdcm/utils/sstable/sstable_utils.py index 3635c0e13c..c25e76004c 100644 --- a/sdcm/utils/sstable/sstable_utils.py +++ b/sdcm/utils/sstable/sstable_utils.py @@ -57,6 +57,7 @@ def get_sstables(self, from_minutes_ago: int = 0): def check_that_sstables_are_encrypted(self, sstables=None, # pylint: disable=too-many-branches expected_bool_value: bool = True) -> list: + if not sstables: sstables = self.get_sstables() if isinstance(sstables, str): diff --git a/sdcm/utils/threads_and_processes_alive.py b/sdcm/utils/threads_and_processes_alive.py index 3721ef54bd..e939b76312 100644 --- a/sdcm/utils/threads_and_processes_alive.py +++ b/sdcm/utils/threads_and_processes_alive.py @@ -20,7 +20,7 @@ def get_thread_stacktrace(thread): # pylint: disable=no-self-use if line: output.append(" %s" % (line.strip())) return '\n'.join(output) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Failed to get stack trace due to the: %s', exc) return 'FAILED TO GET STACKTRACE' @@ -28,7 +28,7 @@ def get_thread_stacktrace(thread): # pylint: disable=no-self-use def get_source(source: Any): try: return inspect.getsource(source) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error('Failed to source due to the: %s', exc) return 'NO SOURCE AVAILABLE' diff --git a/sdcm/utils/toppartition_util.py b/sdcm/utils/toppartition_util.py index 7e1d1ff872..2f5cd36fb1 100644 --- a/sdcm/utils/toppartition_util.py +++ b/sdcm/utils/toppartition_util.py @@ -83,8 +83,8 @@ def _parse_toppartitions_output(output: str) -> dict: def verify_output(self, output: str): toppartition_result = self._parse_toppartitions_output(output) - for sampler in self._built_args['samplers'].split(','): - sampler = sampler.upper() + for _sampler in self._built_args['samplers'].split(','): + sampler = _sampler.upper() assert sampler in toppartition_result, "{} sampler not found in result".format(sampler) assert toppartition_result[sampler]['toppartitions'] == self._built_args['toppartition'], \ "Wrong expected and actual top partitions number for {} sampler".format(sampler) diff --git a/sdcm/utils/version_utils.py b/sdcm/utils/version_utils.py index 184176a34e..3f6df7f090 100644 --- a/sdcm/utils/version_utils.py +++ b/sdcm/utils/version_utils.py @@ -556,7 +556,7 @@ def resolve_latest_repo_symlink(url: str) -> str: continuation_token = "BEGIN" while continuation_token: for build in s3_objects.get("CommonPrefixes", []): - build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] + build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] # noqa: PLW2901 if build == LATEST_SYMLINK_NAME: continue timestamp = NO_TIMESTAMP @@ -704,13 +704,13 @@ def __call__(self, func): self.VERSIONS[(func.__name__, func.__code__.co_filename)] = {} for min_v, max_v in self.min_max_version_pairs: scylla_type = "enterprise" if any((is_enterprise(v) for v in (min_v, max_v) if v)) else "oss" - min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") - max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") + min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") # noqa: PLW2901 + max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") # noqa: PLW2901 if max_v.count(".") == 1: # NOTE: version parse function considers 4.4 as lower than 4.4.1, # but we expect it to be any of the 4.4.x versions. # So, update all such short versions with the patch part and make it to be huge. - max_v = f"{max_v}.999" + max_v = f"{max_v}.999" # noqa: PLW2901 self.VERSIONS[(func.__name__, func.__code__.co_filename)].update({(min_v, max_v): func}) @wraps(func) @@ -747,7 +747,7 @@ def get_relocatable_pkg_url(scylla_version: str) -> str: get_pkgs_cmd = f'curl -s -X POST http://backtrace.scylladb.com/index.html -d "build_id={scylla_build_id}&backtrace="' res = LOCALRUNNER.run(get_pkgs_cmd) relocatable_pkg = re.findall(fr"{scylla_build_id}.+(http:[/\w.:-]*\.tar\.gz)", res.stdout)[0] - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.warning("Couldn't get relocatable_pkg link due to: %s", exc) return relocatable_pkg @@ -828,7 +828,7 @@ def find_scylla_repo(scylla_version, dist_type='centos', dist_version=None): for key in repo_map: if scylla_version.startswith(key): return repo_map[key] - else: + else: # noqa: PLW0120 raise ValueError(f"repo for scylla version {scylla_version} wasn't found") diff --git a/sdcm/wait.py b/sdcm/wait.py index d7bb84f46e..811d9071c3 100644 --- a/sdcm/wait.py +++ b/sdcm/wait.py @@ -69,7 +69,7 @@ def retry_logger(retry_state): ) res = retry(func, **kwargs) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 err = f"Wait for: {text or func.__name__}: timeout - {timeout} seconds - expired" raising_exc = WaitForTimeoutError(err) if stop_event and stop_event.is_set(): diff --git a/sdcm/ycsb_thread.py b/sdcm/ycsb_thread.py index 05995e89a3..db3537a1e6 100644 --- a/sdcm/ycsb_thread.py +++ b/sdcm/ycsb_thread.py @@ -103,9 +103,9 @@ def run(self): for key, value in match.groupdict().items(): if not key == 'count': try: - value = float(value) / 1000.0 + value = float(value) / 1000.0 # noqa: PLW2901 except ValueError: - value = float(0) + value = float(0) # noqa: PLW2901 self.set_metric(operation, key, float(value)) except Exception: # pylint: disable=broad-except @@ -126,7 +126,7 @@ def copy_template(self, cmd_runner, loader_name, memo={}): # pylint: disable=da web_protocol = "http" + ("s" if self.params.get("alternator_port") == 8043 else "") elif self.params.get('alternator_use_dns_routing'): target_address = 'alternator' - else: + else: # noqa: PLR5501 if hasattr(self.node_list[0], 'parent_cluster'): target_address = self.node_list[0].parent_cluster.get_node().cql_address else: diff --git a/sla_per_user_system_test.py b/sla_per_user_system_test.py index f0fc88f9fb..2224548a6f 100644 --- a/sla_per_user_system_test.py +++ b/sla_per_user_system_test.py @@ -836,7 +836,7 @@ def _compare_workloads_c_s_metrics(self, workloads_queue: list) -> dict: assert len(workloads_results) == 2, \ "Expected workload_results length to be 2, got: %s. workload_results: %s" % ( - len(workloads_results), workloads_results) + len(workloads_results), workloads_results) comparison_results = {} try: for item, target_margin in comparison_axis.items(): @@ -884,13 +884,13 @@ def get_email_data(self): try: email_data = self._get_common_email_data() - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error in gathering common email data: Error:\n%s", error) try: grafana_dataset = self.monitors.get_grafana_screenshot_and_snapshot( self.start_time) if self.monitors else {} - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error in gathering Grafana screenshots and snapshots. Error:\n%s", error) email_data.update({"grafana_screenshots": grafana_dataset.get("screenshots", []), diff --git a/stop_compaction_test.py b/stop_compaction_test.py index 8d2d0c8769..6774c2c073 100644 --- a/stop_compaction_test.py +++ b/stop_compaction_test.py @@ -38,7 +38,7 @@ def wrapper(*args, **kwargs): try: func(*args, **kwargs) return {test_name: ["SUCCESS", []]} - except Exception as exc: # pylint:disable=broad-except + except Exception as exc: # pylint:disable=broad-except # noqa: BLE001 LOGGER.error(exc) return {test_name: ["FAILURE", [exc]]} return wrapper @@ -375,7 +375,7 @@ def get_email_data(self): try: email_data = self._get_common_email_data() - except Exception as error: # pylint: disable=broad-except + except Exception as error: # pylint: disable=broad-except # noqa: BLE001 self.log.error("Error in gathering common email data: Error:\n%s", error) email_data.update({"test_statuses": self.test_statuses, diff --git a/unit_tests/conftest.py b/unit_tests/conftest.py index eb7950241e..edc38b95e1 100644 --- a/unit_tests/conftest.py +++ b/unit_tests/conftest.py @@ -100,14 +100,14 @@ def fixture_docker_scylla(request: pytest.FixtureRequest): # pylint: disable=to def db_up(): try: return scylla.is_port_used(port=BaseNode.CQL_PORT, service_name="scylla-server") - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 logging.error("Error checking for scylla up normal: %s", details) return False def db_alternator_up(): try: return scylla.is_port_used(port=8000, service_name="scylla-server") - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 logging.error("Error checking for scylla up normal: %s", details) return False diff --git a/unit_tests/lib/fake_provisioner.py b/unit_tests/lib/fake_provisioner.py index 55fdeba40e..028a40085c 100644 --- a/unit_tests/lib/fake_provisioner.py +++ b/unit_tests/lib/fake_provisioner.py @@ -76,7 +76,7 @@ def reboot_instance(self, name: str, wait: bool, hard: bool = False) -> None: def run_command(self, name: str, command: str) -> Result: """Runs command on instance.""" - return subprocess.run(command, shell=True, capture_output=True, text=True) # pylint: disable=subprocess-run-check + return subprocess.run(command, shell=True, capture_output=True, text=True, check=False) # pylint: disable=subprocess-run-check @classmethod def discover_regions(cls, test_id: str, **kwargs) -> List[Provisioner]: # pylint: disable=unused-argument diff --git a/unit_tests/provisioner/fake_azure_service.py b/unit_tests/provisioner/fake_azure_service.py index e866967f59..4fdf13de06 100644 --- a/unit_tests/provisioner/fake_azure_service.py +++ b/unit_tests/provisioner/fake_azure_service.py @@ -73,8 +73,8 @@ def create_or_update(self, resource_group_name: str, parameters: Dict[str, Any]) } res_group.update(**parameters) (self.path / resource_group_name).mkdir(exist_ok=True) - with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file: - json.dump(res_group, fp=file, indent=2) + with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file_obj: + json.dump(res_group, fp=file_obj, indent=2) return ResourceGroup.deserialize(res_group) def get(self, name) -> ResourceGroup: @@ -109,9 +109,9 @@ def list(self, resource_group_name: str) -> List[NetworkSecurityGroup]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkSecurityGroup.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkSecurityGroup.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, @@ -179,9 +179,9 @@ def list(self, resource_group_name: str) -> List[VirtualNetwork]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(VirtualNetwork.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(VirtualNetwork.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, parameters: Dict[str, Any] @@ -230,9 +230,9 @@ def list(self, resource_group_name: str, virtual_network_name: str) -> List[Subn except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(Subnet.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(Subnet.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, subnet_name: str, @@ -280,9 +280,9 @@ def list(self, resource_group_name: str) -> List[PublicIPAddress]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(PublicIPAddress.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(PublicIPAddress.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, public_ip_address_name: str, parameters: Dict[str, Any] @@ -310,8 +310,8 @@ def begin_create_or_update(self, resource_group_name: str, public_ip_address_nam "provisioningState": "Succeeded" } } - with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file: - json.dump(base, fp=file, indent=2) + with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file_obj: + json.dump(base, fp=file_obj, indent=2) return WaitableObject() def get(self, resource_group_name: str, public_ip_address_name: str) -> PublicIPAddress: @@ -337,9 +337,9 @@ def list(self, resource_group_name: str) -> List[NetworkInterface]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkInterface.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkInterface.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_interface_name: str, parameters: Dict[str, Any] @@ -584,7 +584,7 @@ def begin_restart(self, resource_group_name, vm_name # pylint: disable=unused-a # pylint: disable=unused-argument,no-self-use def begin_run_command(self, resource_group_name, vm_name, parameters) -> ResultableObject: result = subprocess.run(parameters.script[0], shell=True, capture_output=True, # pylint: disable=subprocess-run-check - text=True) + text=True, check=False) return ResultableObject(result.stdout, result.stderr) diff --git a/unit_tests/test_coredump.py b/unit_tests/test_coredump.py index 7d4705e27f..85aec47de9 100644 --- a/unit_tests/test_coredump.py +++ b/unit_tests/test_coredump.py @@ -108,7 +108,7 @@ def _run_coredump_with_fake_remoter(self, test_name: str): result_coredump_list = results[coredump_status] try: self.assertEqual(expected_coredump_list, result_coredump_list) - except Exception as exc: + except Exception as exc: # noqa: BLE001 raise AssertionError( f'Got unexpected results for {coredump_status}: {str(result_coredump_list)}\n{str(exc)}') from exc diff --git a/unit_tests/test_events.py b/unit_tests/test_events.py index f4a399c1a5..1b79fee9ea 100644 --- a/unit_tests/test_events.py +++ b/unit_tests/test_events.py @@ -785,7 +785,7 @@ def test_kill_nemesis_during_con_event(self): nemesis_event.duration = 15 raise - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pytest.fail("we shouldn't reach this code path") assert nemesis_event.errors_formatted == '' diff --git a/unit_tests/test_remoter.py b/unit_tests/test_remoter.py index d5ff351a67..f4db11bcdb 100644 --- a/unit_tests/test_remoter.py +++ b/unit_tests/test_remoter.py @@ -98,12 +98,12 @@ def _create_and_run_twice_in_same_thread(remoter_type, key_file, stmt, kwargs, p pod_name='sct-cluster-dc-1-kind-0', container="scylla", namespace="scylla") try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) remoter.stop() @@ -120,13 +120,13 @@ def _create_and_run_in_same_thread(remoter_type, host, key_file, stmt, kwargs, p pod_name='sct-cluster-dc-1-kind-0', container="scylla", namespace="scylla") try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) remoter._reconnect() # pylint: disable=protected-access try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) remoter.stop() @@ -135,13 +135,13 @@ def _create_and_run_in_same_thread(remoter_type, host, key_file, stmt, kwargs, p def _create_and_run_in_separate_thread(remoter, stmt, kwargs, paramiko_thread_results): try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) remoter._reconnect() # pylint: disable=protected-access try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc paramiko_thread_results.append(result) remoter.stop() @@ -211,7 +211,7 @@ def test_run_in_mainthread( # pylint: disable=too-many-arguments 'timeout': timeout} try: expected = LocalCmdRunner().run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 expected = exc if issubclass(remoter_type, (RemoteCmdRunner, RemoteLibSSH2CmdRunner)): @@ -223,12 +223,12 @@ def test_run_in_mainthread( # pylint: disable=too-many-arguments pod_name='sct-cluster-dc-1-kind-0', container="scylla", namespace="scylla") try: result = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result = exc remoter._reconnect() # pylint: disable=protected-access try: result2 = remoter.run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 result2 = exc remoter.stop() @@ -249,7 +249,7 @@ def test_create_and_run_in_same_thread( # pylint: disable=too-many-arguments,to self.log.info(repr({stmt: stmt, **kwargs})) try: expected = LocalCmdRunner().run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 expected = exc paramiko_thread_results = [] @@ -276,7 +276,7 @@ def test_create_and_run_in_separate_thread( # pylint: disable=too-many-argument self.log.info(repr({stmt: stmt, **kwargs})) try: expected = LocalCmdRunner().run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 expected = exc # Paramiko fails too often when it is invoked like that, that is why it is not in the test @@ -332,7 +332,7 @@ def test_load_1000_threads(self, remoter_type, stmt: str): self.log.info(repr({stmt: stmt, **kwargs})) try: expected = LocalCmdRunner().run(stmt, **kwargs) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 expected = exc libssh2_thread_results = [] diff --git a/unit_tests/test_sct_events_base.py b/unit_tests/test_sct_events_base.py index f418b2299b..c7045006af 100644 --- a/unit_tests/test_sct_events_base.py +++ b/unit_tests/test_sct_events_base.py @@ -137,7 +137,7 @@ class Z(SctEvent): self.assertNotEqual(z, y) def test_equal_pickle_unpickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): pass @@ -372,7 +372,7 @@ class Mixin: self.assertEqual(yt.attr1, "value1") def test_add_subevent_type_pickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): T: Type[SctEvent] @@ -493,7 +493,7 @@ class Y(LogEvent): self.assertTrue(y._ready_to_publish) def test_clone_fresh(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass @@ -516,7 +516,7 @@ class Y(LogEvent): self.assertIsInstance(y, SctEventProtocol) def test_clone_with_info(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass diff --git a/unit_tests/test_sct_events_loaders.py b/unit_tests/test_sct_events_loaders.py index e795758506..14df1b2ee7 100644 --- a/unit_tests/test_sct_events_loaders.py +++ b/unit_tests/test_sct_events_loaders.py @@ -187,7 +187,7 @@ def test_continuous_event_with_error(self): try: raise ValueError('Stress command completed with bad status 1') - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 scylla_bench_event.severity = Severity.ERROR scylla_bench_event.add_error([str(exc)]) diff --git a/unit_tests/test_tester.py b/unit_tests/test_tester.py index c2dd9539d2..7f8cee32da 100644 --- a/unit_tests/test_tester.py +++ b/unit_tests/test_tester.py @@ -215,7 +215,7 @@ def test(): end_time = time.time() + 2 while time.time() < end_time: time.sleep(0.1) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass def _validate_results(self): diff --git a/unit_tests/test_version_utils.py b/unit_tests/test_version_utils.py index 363ed33d15..b9d85f1432 100644 --- a/unit_tests/test_version_utils.py +++ b/unit_tests/test_version_utils.py @@ -217,14 +217,13 @@ def __init__(self, scylla_version, nemesis_like_class): node_scylla_version = "2023.1.dev" elif scylla_version.startswith('master:') or scylla_version == "": node_scylla_version = "4.7.dev" + elif ":" in scylla_version: + node_scylla_version = scylla_version.split(":")[0] + if node_scylla_version.count(".") < 1: + node_scylla_version += ".0" + node_scylla_version += ".dev" else: - if ":" in scylla_version: - node_scylla_version = scylla_version.split(":")[0] - if node_scylla_version.count(".") < 1: - node_scylla_version += ".0" - node_scylla_version += ".dev" - else: - node_scylla_version = scylla_version + node_scylla_version = scylla_version nodes = [type("Node", (object,), {"scylla_version": node_scylla_version})] if nemesis_like_class: self.cluster = type("Cluster", (object,), { diff --git a/upgrade_schema_test.py b/upgrade_schema_test.py index d361f3d706..9954b245d5 100644 --- a/upgrade_schema_test.py +++ b/upgrade_schema_test.py @@ -258,8 +258,8 @@ def _get_thrift_client(self, host, port=9160): # 9160 def test_upgrade_schema(self): - global thrift_client - global cql_client + global thrift_client # noqa: PLW0603 + global cql_client # noqa: PLW0603 ips = [] for node in self.db_cluster.nodes: ips.append(node.public_ip_address) diff --git a/upgrade_test.py b/upgrade_test.py index c5e1f6740f..e72746cd6e 100644 --- a/upgrade_test.py +++ b/upgrade_test.py @@ -24,6 +24,8 @@ from typing import List import contextlib +import cassandra +import tenacity from argus.client.sct.types import Package from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement # pylint: disable=no-name-in-module @@ -66,7 +68,7 @@ def inner(self, *args, **kwargs): try: self.cql_truncate_simple_tables(session=session, rows=NUMBER_OF_ROWS_FOR_TRUNCATE_TEST) InfoEvent(message="Finish truncate simple tables").publish() - except Exception as details: # pylint: disable=broad-except + except cassandra.DriverException as details: InfoEvent(message=f"Failed truncate simple tables. Error: {str(details)}. Traceback: {traceback.format_exc()}", severity=Severity.ERROR).publish() self.validate_truncated_entries_for_table(session=session, system_truncated=True) @@ -166,7 +168,7 @@ def read_data_from_truncated_tables(self, session): msg='Expected that there is no data in the table truncate_ks.{}, but found {} rows' .format(table_name, count[0][0])) InfoEvent(message=f"Finish read data from {table_name} tables").publish() - except Exception as details: # pylint: disable=broad-except + except Exception as details: # pylint: disable=broad-except # noqa: BLE001 InfoEvent(message=f"Failed read data from {table_name} tables. Error: {str(details)}. Traceback: {traceback.format_exc()}", severity=Severity.ERROR).publish() @@ -195,7 +197,7 @@ def upgrade_node(self, node, upgrade_sstables=True): @decorate_with_context(ignore_abort_requested_errors) # https://github.com/scylladb/scylla/issues/10447#issuecomment-1194155163 - def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): + def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): # noqa: PLR0915 # pylint: disable=too-many-branches,too-many-statements new_scylla_repo = new_scylla_repo or self.params.get('new_scylla_repo') new_version = new_version or self.params.get('new_version') @@ -310,7 +312,7 @@ def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_v InfoEvent(message='upgrade_node - starting to "daemon-reload"').publish() node.remoter.run('sudo systemctl daemon-reload') InfoEvent(message='upgrade_node - ended to "daemon-reload"').publish() - else: + else: # noqa: PLR5501 if node.distro.is_rhel_like: InfoEvent(message='upgrade_node - starting to "yum update"').publish() node.remoter.run(r'sudo yum update {}\* -y'.format(scylla_pkg_ver)) @@ -497,7 +499,7 @@ def wait_for_node_to_finish(): assert list(sstable_versions)[0] == self.expected_sstable_format_version, ( "expected to format version to be '{}', found '{}'".format( self.expected_sstable_format_version, list(sstable_versions)[0])) - except Exception as ex: # pylint: disable=broad-except + except Exception as ex: # pylint: disable=broad-except # noqa: BLE001 self.log.warning(ex) return False else: @@ -507,7 +509,7 @@ def wait_for_node_to_finish(): InfoEvent(message="Start waiting for upgardesstables to finish").publish() wait.wait_for(func=wait_for_node_to_finish, step=30, timeout=900, throw_exc=True, text="Waiting until upgardesstables is finished") - except Exception: # pylint: disable=broad-except + except tenacity.RetryError: all_tables_upgraded = False finally: if queue: @@ -605,7 +607,7 @@ def _update_scylla_yaml_on_node(node_to_update: BaseNode, updates: dict): with node_to_update.remote_scylla_yaml() as scylla_yaml: scylla_yaml.update(updates) - def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements + def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 """ Upgrade half of nodes in the cluster, and start special read workload during the stage. Checksum method is changed to xxhash from Scylla 2.2, @@ -1404,7 +1406,7 @@ def _custom_profile_rolling_upgrade(self, cs_user_profiles, new_scylla_repo=None try: self.metric_has_data( metric_query='sct_cassandra_stress_user_gauge{type="ops", keyspace="%s"}' % keyspace_name, n=10) - except Exception as err: # pylint: disable=broad-except + except Exception as err: # pylint: disable=broad-except # noqa: BLE001 InfoEvent( f"Get metrix data for keyspace {keyspace_name} failed with error: {err}", severity=Severity.ERROR).publish() diff --git a/utils/build_system/create_test_release_jobs.py b/utils/build_system/create_test_release_jobs.py index a37cfaf867..995d2e622b 100644 --- a/utils/build_system/create_test_release_jobs.py +++ b/utils/build_system/create_test_release_jobs.py @@ -178,7 +178,7 @@ def create_job_tree(self, local_path: str | Path, # pylint: disable=too-many-ar self.create_directory(jenkins_path, display_name=display_name) for job_file in job_files: - job_file = Path(root) / job_file + job_file = Path(root) / job_file # noqa: PLW2901 if (job_file.suffix == '.jenkinsfile') and create_pipelines_jobs: self.create_pipeline_job(job_file, group_name=jenkins_path, job_name_suffix=job_name_suffix) if (job_file.suffix == '.xml') and create_freestyle_jobs: diff --git a/utils/cloud_cleanup/aws/clean_aws.py b/utils/cloud_cleanup/aws/clean_aws.py index e7f2c41b7b..77999f6330 100644 --- a/utils/cloud_cleanup/aws/clean_aws.py +++ b/utils/cloud_cleanup/aws/clean_aws.py @@ -100,7 +100,7 @@ def stop_instance(instance): } ]) instance.stop() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 eprint("stop instance %s error: %s" % (instance.id, str(exc))) @@ -113,7 +113,7 @@ def remove_protection(instance): 'Value': False }) print_instance(instance, "Disabling API Termination protection") - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 eprint("DisableApiTermination protection %s error: %s" % (instance.id, str(exc))) @@ -127,7 +127,7 @@ def terminate_instance(instance): } ]) instance.terminate() - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 eprint("terminate instance %s error: %s" % (instance.id, str(exc))) @@ -230,7 +230,7 @@ def delete_volume(volume): print_volume(volume, "deleting") if not DRY_RUN: volume.delete() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: BLE001 pass @@ -281,7 +281,7 @@ def release_address(eip_dict, client): client.release_address(AllocationId=eip_dict['AllocationId']) elif "PublicIp" in eip_dict: client.release_address(PublicIp=eip_dict['PublicIp']) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 print(exc) diff --git a/utils/cloud_cleanup/azure/clean_azure.py b/utils/cloud_cleanup/azure/clean_azure.py index 92fc04b9c5..dbefde251f 100755 --- a/utils/cloud_cleanup/azure/clean_azure.py +++ b/utils/cloud_cleanup/azure/clean_azure.py @@ -53,7 +53,7 @@ def get_vm_creation_time(v_m, resource_group_name): compute_client.virtual_machines.begin_update(resource_group_name, v_m.name, parameters={ "tags": tags, }) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info( "Failed to update VM tags: %s in resource group: %s with exception: %s", v_m.name, resource_group_name, exc) @@ -72,7 +72,7 @@ def get_rg_creation_time(resource_group): resource_group.tags = tags try: resource_client.resource_groups.create_or_update(resource_group.name, resource_group) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to update RG tags: %s with exception: %s", resource_group.name, exc) return creation_time @@ -102,7 +102,7 @@ def delete_virtual_machine(resource_group_name, vm_name, dry_run=False): LOGGER.info("Deleting VM: %s in resource group: %s", vm_name, resource_group_name) try: compute_client.virtual_machines.begin_delete(resource_group_name, vm_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info( "Failed to delete VM: %s in resource group: %s with exception: %s", vm_name, resource_group_name, exc) @@ -114,7 +114,7 @@ def stop_virtual_machine(resource_group_name, vm_name, dry_run=False): LOGGER.info("Stopping VM: %s in resource group: %s", vm_name, resource_group_name) try: compute_client.virtual_machines.begin_deallocate(resource_group_name, vm_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to stop VM: %s in resource group: %s with exception: %s", vm_name, resource_group_name, exc) @@ -126,7 +126,7 @@ def delete_resource_group(resource_group_name, dry_run=False): LOGGER.info("Deleting resource group: %s", resource_group_name) try: resource_client.resource_groups.begin_delete(resource_group_name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.info("Failed to delete resource group: %s with exception: %s", resource_group_name, exc) @@ -146,12 +146,11 @@ def clean_azure_instances(dry_run=False): if should_keep(creation_time=get_vm_creation_time(v_m, resource_group.name), keep_hours=get_keep_hours(v_m)): LOGGER.info("Keeping VM: %s in resource group: %s", v_m.name, resource_group.name) clean_group = False # skip cleaning group if there's at least one VM to keep + elif get_keep_action(v_m) == "terminate": + vms_to_process.append((delete_virtual_machine, v_m.name)) else: - if get_keep_action(v_m) == "terminate": - vms_to_process.append((delete_virtual_machine, v_m.name)) - else: - vms_to_process.append((stop_virtual_machine, v_m.name)) - clean_group = False # skip cleaning group if there's at least one VM to stop + vms_to_process.append((stop_virtual_machine, v_m.name)) + clean_group = False # skip cleaning group if there's at least one VM to stop if clean_group: delete_resource_group(resource_group.name, dry_run=dry_run) diff --git a/utils/cloud_cleanup/gce/clean_gce.py b/utils/cloud_cleanup/gce/clean_gce.py index ebaee492fc..4690226c1b 100644 --- a/utils/cloud_cleanup/gce/clean_gce.py +++ b/utils/cloud_cleanup/gce/clean_gce.py @@ -57,7 +57,7 @@ def clean_gce_instances(instances_client, project_id, dry_run): zone=instance.zone.split('/')[-1]) res.done() LOGGER.info("%s terminated", instance.name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("error while terminating instance %s: %s", instance.name, exc) else: LOGGER.info("dry run: would terminate instance %s, creation time: %s", @@ -72,7 +72,7 @@ def clean_gce_instances(instances_client, project_id, dry_run): zone=instance.zone.split('/')[-1]) res.done() LOGGER.info("%s stopped", instance.name) - except Exception as exc: # pylint: disable=broad-except + except Exception as exc: # pylint: disable=broad-except # noqa: BLE001 LOGGER.error("error while stopping instance %s: %s", instance.name, exc) else: LOGGER.info("dry run: would stop instance %s, creation time: %s", instance.name, vm_creation_time)